falcon.c 89.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/****************************************************************************
 * Driver for Solarflare Solarstorm network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
 * Copyright 2006-2008 Solarflare Communications Inc.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
16 17
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
B
Ben Hutchings 已提交
18
#include <linux/mii.h>
19 20 21 22 23 24
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "mac.h"
#include "spi.h"
#include "falcon.h"
25
#include "regs.h"
26
#include "io.h"
27 28 29 30 31 32 33 34 35 36 37 38
#include "mdio_10g.h"
#include "phy.h"
#include "workarounds.h"

/* Falcon hardware control.
 * Falcon is the internal codename for the SFC4000 controller that is
 * present in SFE400X evaluation boards
 */

/**
 * struct falcon_nic_data - Falcon NIC state
 * @pci_dev2: The secondary PCI device if present
39
 * @i2c_data: Operations and state for I2C bit-bashing algorithm
40 41 42
 */
struct falcon_nic_data {
	struct pci_dev *pci_dev2;
43
	struct i2c_algo_bit_data i2c_data;
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
};

/**************************************************************************
 *
 * Configurable values
 *
 **************************************************************************
 */

static int disable_dma_stats;

/* This is set to 16 for a good reason.  In summary, if larger than
 * 16, the descriptor cache holds more than a default socket
 * buffer's worth of packets (for UDP we can only have at most one
 * socket buffer's worth outstanding).  This combined with the fact
 * that we only get 1 TX event per descriptor cache means the NIC
 * goes idle.
 */
#define TX_DC_ENTRIES 16
#define TX_DC_ENTRIES_ORDER 0
#define TX_DC_BASE 0x130000

#define RX_DC_ENTRIES 64
#define RX_DC_ENTRIES_ORDER 2
#define RX_DC_BASE 0x100000

70 71 72 73 74 75 76 77 78 79 80 81 82 83
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
 * 8 KB, 16-bit address, 32 B write block */
large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
		     | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
		     | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
/* Default flash device: Atmel AT25F1024
 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
		      | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
		      | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
		      | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
		      | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
/* RX FIFO XOFF watermark
 *
 * When the amount of the RX FIFO increases used increases past this
 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
 * This also has an effect on RX/TX arbitration
 */
static int rx_xoff_thresh_bytes = -1;
module_param(rx_xoff_thresh_bytes, int, 0644);
MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");

/* RX FIFO XON watermark
 *
 * When the amount of the RX FIFO used decreases below this
 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
 * This also has an effect on RX/TX arbitration
 */
static int rx_xon_thresh_bytes = -1;
module_param(rx_xon_thresh_bytes, int, 0644);
MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");

104 105 106 107 108 109
/* If FALCON_MAX_INT_ERRORS internal errors occur within
 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
 * disable it.
 */
#define FALCON_INT_ERROR_EXPIRE 3600
#define FALCON_MAX_INT_ERRORS 5
110

111 112 113 114
/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
 */
#define FALCON_FLUSH_INTERVAL 10
#define FALCON_FLUSH_POLL_COUNT 100
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129

/**************************************************************************
 *
 * Falcon constants
 *
 **************************************************************************
 */

/* Size and alignment of special buffers (4KB) */
#define FALCON_BUF_SIZE 4096

/* Dummy SRAM size code */
#define SRM_NB_BSZ_ONCHIP_ONLY (-1)

#define FALCON_IS_DUAL_FUNC(efx)		\
130
	(falcon_rev(efx) < FALCON_REV_B0)
131 132 133 134 135 136 137

/**************************************************************************
 *
 * Falcon hardware access
 *
 **************************************************************************/

138 139 140 141 142 143 144
static inline void falcon_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
					unsigned int index)
{
	efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
			value, index);
}

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
/* Read the current event from the event queue */
static inline efx_qword_t *falcon_event(struct efx_channel *channel,
					unsigned int index)
{
	return (((efx_qword_t *) (channel->eventq.addr)) + index);
}

/* See if an event is present
 *
 * We check both the high and low dword of the event for all ones.  We
 * wrote all ones when we cleared the event, and no valid event can
 * have all ones in either its high or low dwords.  This approach is
 * robust against reordering.
 *
 * Note that using a single 64-bit comparison is incorrect; even
 * though the CPU read will be atomic, the DMA write may not be.
 */
static inline int falcon_event_present(efx_qword_t *event)
{
	return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
		  EFX_DWORD_IS_ALL_ONES(event->dword[1])));
}

/**************************************************************************
 *
 * I2C bus - this is a bit-bashing interface using GPIO pins
 * Note that it uses the output enables to tristate the outputs
 * SDA is the data pin and SCL is the clock
 *
 **************************************************************************
 */
176
static void falcon_setsda(void *data, int state)
177
{
178
	struct efx_nic *efx = (struct efx_nic *)data;
179 180
	efx_oword_t reg;

181
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
182
	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
183
	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
184 185
}

186
static void falcon_setscl(void *data, int state)
187
{
188
	struct efx_nic *efx = (struct efx_nic *)data;
189 190
	efx_oword_t reg;

191
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
192
	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
193
	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
194 195 196 197 198 199 200
}

static int falcon_getsda(void *data)
{
	struct efx_nic *efx = (struct efx_nic *)data;
	efx_oword_t reg;

201
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
202
	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
203 204
}

205
static int falcon_getscl(void *data)
206
{
207
	struct efx_nic *efx = (struct efx_nic *)data;
208 209
	efx_oword_t reg;

210
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
211
	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
212 213
}

214 215 216
static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
	.setsda		= falcon_setsda,
	.setscl		= falcon_setscl,
217 218
	.getsda		= falcon_getsda,
	.getscl		= falcon_getscl,
219
	.udelay		= 5,
220 221
	/* Wait up to 50 ms for slave to let us pull SCL high */
	.timeout	= DIV_ROUND_UP(HZ, 20),
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
};

/**************************************************************************
 *
 * Falcon special buffer handling
 * Special buffers are used for event queues and the TX and RX
 * descriptor rings.
 *
 *************************************************************************/

/*
 * Initialise a Falcon special buffer
 *
 * This will define a buffer (previously allocated via
 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
 * it to be used for event queues, descriptor rings etc.
 */
239
static void
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
falcon_init_special_buffer(struct efx_nic *efx,
			   struct efx_special_buffer *buffer)
{
	efx_qword_t buf_desc;
	int index;
	dma_addr_t dma_addr;
	int i;

	EFX_BUG_ON_PARANOID(!buffer->addr);

	/* Write buffer descriptors to NIC */
	for (i = 0; i < buffer->entries; i++) {
		index = buffer->index + i;
		dma_addr = buffer->dma_addr + (i * 4096);
		EFX_LOG(efx, "mapping special buffer %d at %llx\n",
			index, (unsigned long long)dma_addr);
256 257 258 259
		EFX_POPULATE_QWORD_3(buf_desc,
				     FRF_AZ_BUF_ADR_REGION, 0,
				     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
				     FRF_AZ_BUF_OWNER_ID_FBUF, 0);
260
		falcon_write_buf_tbl(efx, &buf_desc, index);
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
	}
}

/* Unmaps a buffer from Falcon and clears the buffer table entries */
static void
falcon_fini_special_buffer(struct efx_nic *efx,
			   struct efx_special_buffer *buffer)
{
	efx_oword_t buf_tbl_upd;
	unsigned int start = buffer->index;
	unsigned int end = (buffer->index + buffer->entries - 1);

	if (!buffer->entries)
		return;

	EFX_LOG(efx, "unmapping special buffers %d-%d\n",
		buffer->index, buffer->index + buffer->entries - 1);

	EFX_POPULATE_OWORD_4(buf_tbl_upd,
280 281 282 283
			     FRF_AZ_BUF_UPD_CMD, 0,
			     FRF_AZ_BUF_CLR_CMD, 1,
			     FRF_AZ_BUF_CLR_END_ID, end,
			     FRF_AZ_BUF_CLR_START_ID, start);
284
	efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
}

/*
 * Allocate a new Falcon special buffer
 *
 * This allocates memory for a new buffer, clears it and allocates a
 * new buffer ID range.  It does not write into Falcon's buffer table.
 *
 * This call will allocate 4KB buffers, since Falcon can't use 8KB
 * buffers for event queues and descriptor rings.
 */
static int falcon_alloc_special_buffer(struct efx_nic *efx,
				       struct efx_special_buffer *buffer,
				       unsigned int len)
{
	len = ALIGN(len, FALCON_BUF_SIZE);

	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
					    &buffer->dma_addr);
	if (!buffer->addr)
		return -ENOMEM;
	buffer->len = len;
	buffer->entries = len / FALCON_BUF_SIZE;
	BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));

	/* All zeros is a potentially valid event so memset to 0xff */
	memset(buffer->addr, 0xff, len);

	/* Select new buffer ID */
314 315
	buffer->index = efx->next_buffer_table;
	efx->next_buffer_table += buffer->entries;
316 317

	EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
318
		"(virt %p phys %llx)\n", buffer->index,
319
		buffer->index + buffer->entries - 1,
320 321
		(u64)buffer->dma_addr, len,
		buffer->addr, (u64)virt_to_phys(buffer->addr));
322 323 324 325 326 327 328 329 330 331 332

	return 0;
}

static void falcon_free_special_buffer(struct efx_nic *efx,
				       struct efx_special_buffer *buffer)
{
	if (!buffer->addr)
		return;

	EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
333
		"(virt %p phys %llx)\n", buffer->index,
334
		buffer->index + buffer->entries - 1,
335 336
		(u64)buffer->dma_addr, buffer->len,
		buffer->addr, (u64)virt_to_phys(buffer->addr));
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392

	pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
			    buffer->dma_addr);
	buffer->addr = NULL;
	buffer->entries = 0;
}

/**************************************************************************
 *
 * Falcon generic buffer handling
 * These buffers are used for interrupt status and MAC stats
 *
 **************************************************************************/

static int falcon_alloc_buffer(struct efx_nic *efx,
			       struct efx_buffer *buffer, unsigned int len)
{
	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
					    &buffer->dma_addr);
	if (!buffer->addr)
		return -ENOMEM;
	buffer->len = len;
	memset(buffer->addr, 0, len);
	return 0;
}

static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
{
	if (buffer->addr) {
		pci_free_consistent(efx->pci_dev, buffer->len,
				    buffer->addr, buffer->dma_addr);
		buffer->addr = NULL;
	}
}

/**************************************************************************
 *
 * Falcon TX path
 *
 **************************************************************************/

/* Returns a pointer to the specified transmit descriptor in the TX
 * descriptor queue belonging to the specified channel.
 */
static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
					       unsigned int index)
{
	return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
}

/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
{
	unsigned write_ptr;
	efx_dword_t reg;

393
	write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
394
	EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
395 396
	efx_writed_page(tx_queue->efx, &reg,
			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
}


/* For each entry inserted into the software descriptor ring, create a
 * descriptor in the hardware TX descriptor ring (in host memory), and
 * write a doorbell.
 */
void falcon_push_buffers(struct efx_tx_queue *tx_queue)
{

	struct efx_tx_buffer *buffer;
	efx_qword_t *txd;
	unsigned write_ptr;

	BUG_ON(tx_queue->write_count == tx_queue->insert_count);

	do {
414
		write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
415 416 417 418 419
		buffer = &tx_queue->buffer[write_ptr];
		txd = falcon_tx_desc(tx_queue, write_ptr);
		++tx_queue->write_count;

		/* Create TX descriptor ring entry */
420 421 422 423 424
		EFX_POPULATE_QWORD_4(*txd,
				     FSF_AZ_TX_KER_CONT, buffer->continuation,
				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
				     FSF_AZ_TX_KER_BUF_REGION, 0,
				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
425 426 427 428 429 430 431 432 433 434
	} while (tx_queue->write_count != tx_queue->insert_count);

	wmb(); /* Ensure descriptors are written before they are fetched */
	falcon_notify_tx_desc(tx_queue);
}

/* Allocate hardware resources for a TX queue */
int falcon_probe_tx(struct efx_tx_queue *tx_queue)
{
	struct efx_nic *efx = tx_queue->efx;
435 436
	BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
		     EFX_TXQ_SIZE & EFX_TXQ_MASK);
437
	return falcon_alloc_special_buffer(efx, &tx_queue->txd,
438
					   EFX_TXQ_SIZE * sizeof(efx_qword_t));
439 440
}

441
void falcon_init_tx(struct efx_tx_queue *tx_queue)
442 443 444 445
{
	efx_oword_t tx_desc_ptr;
	struct efx_nic *efx = tx_queue->efx;

446 447
	tx_queue->flushed = false;

448
	/* Pin TX descriptor ring */
449
	falcon_init_special_buffer(efx, &tx_queue->txd);
450 451 452

	/* Push TX descriptor ring to card */
	EFX_POPULATE_OWORD_10(tx_desc_ptr,
453 454 455 456 457 458 459 460
			      FRF_AZ_TX_DESCQ_EN, 1,
			      FRF_AZ_TX_ISCSI_DDIG_EN, 0,
			      FRF_AZ_TX_ISCSI_HDIG_EN, 0,
			      FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
			      FRF_AZ_TX_DESCQ_EVQ_ID,
			      tx_queue->channel->channel,
			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
461 462
			      FRF_AZ_TX_DESCQ_SIZE,
			      __ffs(tx_queue->txd.entries),
463 464
			      FRF_AZ_TX_DESCQ_TYPE, 0,
			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);
465

466
	if (falcon_rev(efx) >= FALCON_REV_B0) {
467
		int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
468 469 470
		EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
		EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
				    !csum);
471 472
	}

473 474
	efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
			 tx_queue->queue);
475

476
	if (falcon_rev(efx) < FALCON_REV_B0) {
477 478
		efx_oword_t reg;

479 480
		/* Only 128 bits in this register */
		BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
481

482
		efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
483
		if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
484 485 486
			clear_bit_le(tx_queue->queue, (void *)&reg);
		else
			set_bit_le(tx_queue->queue, (void *)&reg);
487
		efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
488 489 490
	}
}

491
static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
492 493 494 495 496 497
{
	struct efx_nic *efx = tx_queue->efx;
	efx_oword_t tx_flush_descq;

	/* Post a flush command */
	EFX_POPULATE_OWORD_2(tx_flush_descq,
498 499
			     FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
500
	efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
501 502 503 504 505 506 507
}

void falcon_fini_tx(struct efx_tx_queue *tx_queue)
{
	struct efx_nic *efx = tx_queue->efx;
	efx_oword_t tx_desc_ptr;

508 509
	/* The queue should have been flushed */
	WARN_ON(!tx_queue->flushed);
510 511 512

	/* Remove TX descriptor ring from card */
	EFX_ZERO_OWORD(tx_desc_ptr);
513 514
	efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
			 tx_queue->queue);
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548

	/* Unpin TX descriptor ring */
	falcon_fini_special_buffer(efx, &tx_queue->txd);
}

/* Free buffers backing TX queue */
void falcon_remove_tx(struct efx_tx_queue *tx_queue)
{
	falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
}

/**************************************************************************
 *
 * Falcon RX path
 *
 **************************************************************************/

/* Returns a pointer to the specified descriptor in the RX descriptor queue */
static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
					       unsigned int index)
{
	return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
}

/* This creates an entry in the RX descriptor queue */
static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
					unsigned index)
{
	struct efx_rx_buffer *rx_buf;
	efx_qword_t *rxd;

	rxd = falcon_rx_desc(rx_queue, index);
	rx_buf = efx_rx_buffer(rx_queue, index);
	EFX_POPULATE_QWORD_3(*rxd,
549
			     FSF_AZ_RX_KER_BUF_SIZE,
550 551
			     rx_buf->len -
			     rx_queue->efx->type->rx_buffer_padding,
552 553
			     FSF_AZ_RX_KER_BUF_REGION, 0,
			     FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
554 555 556 557 558 559 560 561 562 563 564 565 566
}

/* This writes to the RX_DESC_WPTR register for the specified receive
 * descriptor ring.
 */
void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
{
	efx_dword_t reg;
	unsigned write_ptr;

	while (rx_queue->notified_count != rx_queue->added_count) {
		falcon_build_rx_desc(rx_queue,
				     rx_queue->notified_count &
567
				     EFX_RXQ_MASK);
568 569 570 571
		++rx_queue->notified_count;
	}

	wmb();
572
	write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
573
	EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
574 575
	efx_writed_page(rx_queue->efx, &reg,
			FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
576 577 578 579 580
}

int falcon_probe_rx(struct efx_rx_queue *rx_queue)
{
	struct efx_nic *efx = rx_queue->efx;
581 582
	BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
		     EFX_RXQ_SIZE & EFX_RXQ_MASK);
583
	return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
584
					   EFX_RXQ_SIZE * sizeof(efx_qword_t));
585 586
}

587
void falcon_init_rx(struct efx_rx_queue *rx_queue)
588 589 590
{
	efx_oword_t rx_desc_ptr;
	struct efx_nic *efx = rx_queue->efx;
591 592
	bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
	bool iscsi_digest_en = is_b0;
593 594 595 596 597

	EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
		rx_queue->queue, rx_queue->rxd.index,
		rx_queue->rxd.index + rx_queue->rxd.entries - 1);

598 599
	rx_queue->flushed = false;

600
	/* Pin RX descriptor ring */
601
	falcon_init_special_buffer(efx, &rx_queue->rxd);
602 603 604

	/* Push RX descriptor ring to card */
	EFX_POPULATE_OWORD_10(rx_desc_ptr,
605 606 607 608 609 610 611
			      FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
			      FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
			      FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
			      FRF_AZ_RX_DESCQ_EVQ_ID,
			      rx_queue->channel->channel,
			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
			      FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
612 613
			      FRF_AZ_RX_DESCQ_SIZE,
			      __ffs(rx_queue->rxd.entries),
614
			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
615
			      /* For >=B0 this is scatter so disable */
616 617
			      FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
			      FRF_AZ_RX_DESCQ_EN, 1);
618 619
	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
			 rx_queue->queue);
620 621
}

622
static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
623 624 625 626 627 628
{
	struct efx_nic *efx = rx_queue->efx;
	efx_oword_t rx_flush_descq;

	/* Post a flush command */
	EFX_POPULATE_OWORD_2(rx_flush_descq,
629 630
			     FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
			     FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
631
	efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
632 633 634 635 636 637 638
}

void falcon_fini_rx(struct efx_rx_queue *rx_queue)
{
	efx_oword_t rx_desc_ptr;
	struct efx_nic *efx = rx_queue->efx;

639 640
	/* The queue should already have been flushed */
	WARN_ON(!rx_queue->flushed);
641 642 643

	/* Remove RX descriptor ring from card */
	EFX_ZERO_OWORD(rx_desc_ptr);
644 645
	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
			 rx_queue->queue);
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677

	/* Unpin RX descriptor ring */
	falcon_fini_special_buffer(efx, &rx_queue->rxd);
}

/* Free buffers backing RX queue */
void falcon_remove_rx(struct efx_rx_queue *rx_queue)
{
	falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
}

/**************************************************************************
 *
 * Falcon event queue processing
 * Event queues are processed by per-channel tasklets.
 *
 **************************************************************************/

/* Update a channel's event queue's read pointer (RPTR) register
 *
 * This writes the EVQ_RPTR_REG register for the specified channel's
 * event queue.
 *
 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
 * whereas channel->eventq_read_ptr contains the index of the "next to
 * read" event.
 */
void falcon_eventq_read_ack(struct efx_channel *channel)
{
	efx_dword_t reg;
	struct efx_nic *efx = channel->efx;

678
	EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
679
	efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
680
			    channel->channel);
681 682 683 684 685 686 687
}

/* Use HW to insert a SW defined event */
void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
{
	efx_oword_t drv_ev_reg;

688 689 690 691 692 693 694
	BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
		     FRF_AZ_DRV_EV_DATA_WIDTH != 64);
	drv_ev_reg.u32[0] = event->u32[0];
	drv_ev_reg.u32[1] = event->u32[1];
	drv_ev_reg.u32[2] = 0;
	drv_ev_reg.u32[3] = 0;
	EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
695
	efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
696 697 698 699 700 701 702
}

/* Handle a transmit completion event
 *
 * Falcon batches TX completion events; the message we receive is of
 * the form "complete all TX events up to this index".
 */
703 704
static void falcon_handle_tx_event(struct efx_channel *channel,
				   efx_qword_t *event)
705 706 707 708 709 710
{
	unsigned int tx_ev_desc_ptr;
	unsigned int tx_ev_q_label;
	struct efx_tx_queue *tx_queue;
	struct efx_nic *efx = channel->efx;

711
	if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
712
		/* Transmit completion */
713 714
		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
715
		tx_queue = &efx->tx_queue[tx_ev_q_label];
716 717
		channel->irq_mod_score +=
			(tx_ev_desc_ptr - tx_queue->read_count) &
718
			EFX_TXQ_MASK;
719
		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
720
	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
721
		/* Rewrite the FIFO write pointer */
722
		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
723 724
		tx_queue = &efx->tx_queue[tx_ev_q_label];

725
		if (efx_dev_registered(efx))
726 727
			netif_tx_lock(efx->net_dev);
		falcon_notify_tx_desc(tx_queue);
728
		if (efx_dev_registered(efx))
729
			netif_tx_unlock(efx->net_dev);
730
	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
731 732 733 734 735 736 737 738 739 740 741 742
		   EFX_WORKAROUND_10727(efx)) {
		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
	} else {
		EFX_ERR(efx, "channel %d unexpected TX event "
			EFX_QWORD_FMT"\n", channel->channel,
			EFX_QWORD_VAL(*event));
	}
}

/* Detect errors included in the rx_evt_pkt_ok bit. */
static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
				    const efx_qword_t *event,
743 744
				    bool *rx_ev_pkt_ok,
				    bool *discard)
745 746
{
	struct efx_nic *efx = rx_queue->efx;
747 748 749 750 751 752
	bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
	bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
	bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
	bool rx_ev_other_err, rx_ev_pause_frm;
	bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
	unsigned rx_ev_pkt_type;
753

754 755 756 757
	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
	rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
	rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
758
	rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
759 760
						 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
	rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_IP_FRAG_ERR);
761
	rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
762
						  FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
763
	rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
764 765 766
						   FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
	rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
	rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
767
	rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
768 769
			  0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
	rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
770 771 772 773 774 775

	/* Every error apart from tobe_disc and pause_frm */
	rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
			   rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
			   rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);

776 777
	/* Count errors that are not in MAC stats.  Ignore expected
	 * checksum errors during self-test. */
778 779 780 781
	if (rx_ev_frm_trunc)
		++rx_queue->channel->n_rx_frm_trunc;
	else if (rx_ev_tobe_disc)
		++rx_queue->channel->n_rx_tobe_disc;
782 783 784 785 786 787
	else if (!efx->loopback_selftest) {
		if (rx_ev_ip_hdr_chksum_err)
			++rx_queue->channel->n_rx_ip_hdr_chksum_err;
		else if (rx_ev_tcp_udp_chksum_err)
			++rx_queue->channel->n_rx_tcp_udp_chksum_err;
	}
788 789 790 791 792 793 794 795 796 797 798 799 800 801
	if (rx_ev_ip_frag_err)
		++rx_queue->channel->n_rx_ip_frag_err;

	/* The frame must be discarded if any of these are true. */
	*discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
		    rx_ev_tobe_disc | rx_ev_pause_frm);

	/* TOBE_DISC is expected on unicast mismatches; don't print out an
	 * error message.  FRM_TRUNC indicates RXDP dropped the packet due
	 * to a FIFO overflow.
	 */
#ifdef EFX_ENABLE_DEBUG
	if (rx_ev_other_err) {
		EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
802
			    EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
803 804 805 806 807 808 809 810 811 812
			    rx_queue->queue, EFX_QWORD_VAL(*event),
			    rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
			    rx_ev_ip_hdr_chksum_err ?
			    " [IP_HDR_CHKSUM_ERR]" : "",
			    rx_ev_tcp_udp_chksum_err ?
			    " [TCP_UDP_CHKSUM_ERR]" : "",
			    rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
			    rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
			    rx_ev_drib_nib ? " [DRIB_NIB]" : "",
			    rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
813
			    rx_ev_pause_frm ? " [PAUSE]" : "");
814 815 816 817 818 819 820 821 822 823 824
	}
#endif
}

/* Handle receive events that are not in-order. */
static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
				       unsigned index)
{
	struct efx_nic *efx = rx_queue->efx;
	unsigned expected, dropped;

825 826
	expected = rx_queue->removed_count & EFX_RXQ_MASK;
	dropped = (index - expected) & EFX_RXQ_MASK;
827 828 829 830 831 832 833 834 835 836 837 838 839 840
	EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
		dropped, index, expected);

	efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
}

/* Handle a packet received event
 *
 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
 * wrong destination address
 * Also "is multicast" and "matches multicast filter" flags can be used to
 * discard non-matching multicast packets.
 */
B
Ben Hutchings 已提交
841 842
static void falcon_handle_rx_event(struct efx_channel *channel,
				   const efx_qword_t *event)
843
{
B
Ben Hutchings 已提交
844
	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
845
	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
846
	unsigned expected_ptr;
847
	bool rx_ev_pkt_ok, discard = false, checksummed;
848 849 850 851
	struct efx_rx_queue *rx_queue;
	struct efx_nic *efx = channel->efx;

	/* Basic packet information */
852 853 854 855 856 857 858
	rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
	rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
		channel->channel);
859

B
Ben Hutchings 已提交
860
	rx_queue = &efx->rx_queue[channel->channel];
861

862
	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
863
	expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
B
Ben Hutchings 已提交
864
	if (unlikely(rx_ev_desc_ptr != expected_ptr))
865 866 867 868 869 870
		falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);

	if (likely(rx_ev_pkt_ok)) {
		/* If packet is marked as OK and packet type is TCP/IPv4 or
		 * UDP/IPv4, then we can rely on the hardware checksum.
		 */
871
		checksummed =
872 873 874
			efx->rx_checksum_enabled &&
			(rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP ||
			 rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP);
875 876
	} else {
		falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
877
					&discard);
878
		checksummed = false;
879 880 881
	}

	/* Detect multicast packets that didn't match the filter */
882
	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
883 884
	if (rx_ev_mcast_pkt) {
		unsigned int rx_ev_mcast_hash_match =
885
			EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
886 887

		if (unlikely(!rx_ev_mcast_hash_match))
888
			discard = true;
889 890
	}

891 892
	channel->irq_mod_score += 2;

893 894 895 896 897 898 899 900 901 902
	/* Handle received packet */
	efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
		      checksummed, discard);
}

/* Global events are basically PHY events */
static void falcon_handle_global_event(struct efx_channel *channel,
				       efx_qword_t *event)
{
	struct efx_nic *efx = channel->efx;
903
	bool handled = false;
904

905 906 907
	if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
908 909 910 911
		efx->phy_op->clear_interrupt(efx);
		queue_work(efx->workqueue, &efx->phy_work);
		handled = true;
	}
912

913
	if ((falcon_rev(efx) >= FALCON_REV_B0) &&
914
	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
915
		queue_work(efx->workqueue, &efx->mac_work);
916
		handled = true;
917 918
	}

919
	if (falcon_rev(efx) <= FALCON_REV_A1 ?
920 921
	    EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
922 923 924 925 926 927
		EFX_ERR(efx, "channel %d seen global RX_RESET "
			"event. Resetting.\n", channel->channel);

		atomic_inc(&efx->rx_reset);
		efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
928
		handled = true;
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
	}

	if (!handled)
		EFX_ERR(efx, "channel %d unknown global event "
			EFX_QWORD_FMT "\n", channel->channel,
			EFX_QWORD_VAL(*event));
}

static void falcon_handle_driver_event(struct efx_channel *channel,
				       efx_qword_t *event)
{
	struct efx_nic *efx = channel->efx;
	unsigned int ev_sub_code;
	unsigned int ev_sub_data;

944 945
	ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
	ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
946 947

	switch (ev_sub_code) {
948
	case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
949 950 951
		EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
			  channel->channel, ev_sub_data);
		break;
952
	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
953 954 955
		EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
			  channel->channel, ev_sub_data);
		break;
956
	case FSE_AZ_EVQ_INIT_DONE_EV:
957 958 959
		EFX_LOG(efx, "channel %d EVQ %d initialised\n",
			channel->channel, ev_sub_data);
		break;
960
	case FSE_AZ_SRM_UPD_DONE_EV:
961 962 963
		EFX_TRACE(efx, "channel %d SRAM update done\n",
			  channel->channel);
		break;
964
	case FSE_AZ_WAKE_UP_EV:
965 966 967
		EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
			  channel->channel, ev_sub_data);
		break;
968
	case FSE_AZ_TIMER_EV:
969 970 971
		EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
			  channel->channel, ev_sub_data);
		break;
972
	case FSE_AA_RX_RECOVER_EV:
973 974
		EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
			"Resetting.\n", channel->channel);
975
		atomic_inc(&efx->rx_reset);
976 977 978 979 980
		efx_schedule_reset(efx,
				   EFX_WORKAROUND_6555(efx) ?
				   RESET_TYPE_RX_RECOVERY :
				   RESET_TYPE_DISABLE);
		break;
981
	case FSE_BZ_RX_DSC_ERROR_EV:
982 983 984 985
		EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
			" RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
		efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
		break;
986
	case FSE_BZ_TX_DSC_ERROR_EV:
987 988 989 990 991 992 993 994 995 996 997 998
		EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
			" TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
		break;
	default:
		EFX_TRACE(efx, "channel %d unknown driver event code %d "
			  "data %04x\n", channel->channel, ev_sub_code,
			  ev_sub_data);
		break;
	}
}

B
Ben Hutchings 已提交
999
int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1000 1001 1002 1003
{
	unsigned int read_ptr;
	efx_qword_t event, *p_event;
	int ev_code;
B
Ben Hutchings 已提交
1004
	int rx_packets = 0;
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021

	read_ptr = channel->eventq_read_ptr;

	do {
		p_event = falcon_event(channel, read_ptr);
		event = *p_event;

		if (!falcon_event_present(&event))
			/* End of events */
			break;

		EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
			  channel->channel, EFX_QWORD_VAL(event));

		/* Clear this event by marking it all ones */
		EFX_SET_QWORD(*p_event);

1022
		ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1023 1024

		switch (ev_code) {
1025
		case FSE_AZ_EV_CODE_RX_EV:
B
Ben Hutchings 已提交
1026 1027
			falcon_handle_rx_event(channel, &event);
			++rx_packets;
1028
			break;
1029
		case FSE_AZ_EV_CODE_TX_EV:
1030 1031
			falcon_handle_tx_event(channel, &event);
			break;
1032 1033 1034
		case FSE_AZ_EV_CODE_DRV_GEN_EV:
			channel->eventq_magic = EFX_QWORD_FIELD(
				event, FSF_AZ_DRV_GEN_EV_MAGIC);
1035 1036 1037 1038
			EFX_LOG(channel->efx, "channel %d received generated "
				"event "EFX_QWORD_FMT"\n", channel->channel,
				EFX_QWORD_VAL(event));
			break;
1039
		case FSE_AZ_EV_CODE_GLOBAL_EV:
1040 1041
			falcon_handle_global_event(channel, &event);
			break;
1042
		case FSE_AZ_EV_CODE_DRIVER_EV:
1043 1044 1045 1046 1047 1048 1049 1050 1051
			falcon_handle_driver_event(channel, &event);
			break;
		default:
			EFX_ERR(channel->efx, "channel %d unknown event type %d"
				" (data " EFX_QWORD_FMT ")\n", channel->channel,
				ev_code, EFX_QWORD_VAL(event));
		}

		/* Increment read pointer */
1052
		read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1053

B
Ben Hutchings 已提交
1054
	} while (rx_packets < rx_quota);
1055 1056

	channel->eventq_read_ptr = read_ptr;
B
Ben Hutchings 已提交
1057
	return rx_packets;
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
}

void falcon_set_int_moderation(struct efx_channel *channel)
{
	efx_dword_t timer_cmd;
	struct efx_nic *efx = channel->efx;

	/* Set timer register */
	if (channel->irq_moderation) {
		EFX_POPULATE_DWORD_2(timer_cmd,
1068 1069 1070
				     FRF_AB_TC_TIMER_MODE,
				     FFE_BB_TIMER_MODE_INT_HLDOFF,
				     FRF_AB_TC_TIMER_VAL,
1071
				     channel->irq_moderation - 1);
1072 1073
	} else {
		EFX_POPULATE_DWORD_2(timer_cmd,
1074 1075 1076
				     FRF_AB_TC_TIMER_MODE,
				     FFE_BB_TIMER_MODE_DIS,
				     FRF_AB_TC_TIMER_VAL, 0);
1077
	}
1078
	BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
1079 1080
	efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
			       channel->channel);
1081 1082 1083 1084 1085 1086 1087

}

/* Allocate buffer table entries for event queue */
int falcon_probe_eventq(struct efx_channel *channel)
{
	struct efx_nic *efx = channel->efx;
1088 1089 1090 1091
	BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
		     EFX_EVQ_SIZE & EFX_EVQ_MASK);
	return falcon_alloc_special_buffer(efx, &channel->eventq,
					   EFX_EVQ_SIZE * sizeof(efx_qword_t));
1092 1093
}

1094
void falcon_init_eventq(struct efx_channel *channel)
1095 1096 1097 1098 1099 1100 1101 1102 1103
{
	efx_oword_t evq_ptr;
	struct efx_nic *efx = channel->efx;

	EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
		channel->channel, channel->eventq.index,
		channel->eventq.index + channel->eventq.entries - 1);

	/* Pin event queue buffer */
1104
	falcon_init_special_buffer(efx, &channel->eventq);
1105 1106 1107 1108 1109 1110

	/* Fill event queue with all ones (i.e. empty events) */
	memset(channel->eventq.addr, 0xff, channel->eventq.len);

	/* Push event queue to card */
	EFX_POPULATE_OWORD_3(evq_ptr,
1111
			     FRF_AZ_EVQ_EN, 1,
1112
			     FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1113
			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1114 1115
	efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
			 channel->channel);
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126

	falcon_set_int_moderation(channel);
}

void falcon_fini_eventq(struct efx_channel *channel)
{
	efx_oword_t eventq_ptr;
	struct efx_nic *efx = channel->efx;

	/* Remove event queue from card */
	EFX_ZERO_OWORD(eventq_ptr);
1127 1128
	efx_writeo_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
			 channel->channel);
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148

	/* Unpin event queue */
	falcon_fini_special_buffer(efx, &channel->eventq);
}

/* Free buffers backing event queue */
void falcon_remove_eventq(struct efx_channel *channel)
{
	falcon_free_special_buffer(channel->efx, &channel->eventq);
}


/* Generates a test event on the event queue.  A subsequent call to
 * process_eventq() should pick up the event and place the value of
 * "magic" into channel->eventq_magic;
 */
void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
{
	efx_qword_t test_event;

1149 1150 1151
	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
			     FSE_AZ_EV_CODE_DRV_GEN_EV,
			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1152 1153 1154
	falcon_generate_event(channel, &test_event);
}

1155 1156 1157 1158
void falcon_sim_phy_event(struct efx_nic *efx)
{
	efx_qword_t phy_event;

1159 1160
	EFX_POPULATE_QWORD_1(phy_event, FSF_AZ_EV_CODE,
			     FSE_AZ_EV_CODE_GLOBAL_EV);
1161
	if (EFX_IS10G(efx))
1162
		EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_XG_PHY0_INTR, 1);
1163
	else
1164
		EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_G_PHY0_INTR, 1);
1165 1166 1167 1168

	falcon_generate_event(&efx->channel[0], &phy_event);
}

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
/**************************************************************************
 *
 * Flush handling
 *
 **************************************************************************/


static void falcon_poll_flush_events(struct efx_nic *efx)
{
	struct efx_channel *channel = &efx->channel[0];
	struct efx_tx_queue *tx_queue;
	struct efx_rx_queue *rx_queue;
1181
	unsigned int read_ptr = channel->eventq_read_ptr;
1182
	unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
1183

1184
	do {
1185 1186 1187
		efx_qword_t *event = falcon_event(channel, read_ptr);
		int ev_code, ev_sub_code, ev_queue;
		bool ev_failed;
1188

1189 1190 1191
		if (!falcon_event_present(event))
			break;

1192 1193 1194 1195 1196
		ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
		ev_sub_code = EFX_QWORD_FIELD(*event,
					      FSF_AZ_DRIVER_EV_SUBCODE);
		if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
		    ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1197
			ev_queue = EFX_QWORD_FIELD(*event,
1198
						   FSF_AZ_DRIVER_EV_SUBDATA);
1199 1200 1201 1202
			if (ev_queue < EFX_TX_QUEUE_COUNT) {
				tx_queue = efx->tx_queue + ev_queue;
				tx_queue->flushed = true;
			}
1203 1204 1205 1206 1207 1208
		} else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
			   ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
			ev_queue = EFX_QWORD_FIELD(
				*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
			ev_failed = EFX_QWORD_FIELD(
				*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
			if (ev_queue < efx->n_rx_queues) {
				rx_queue = efx->rx_queue + ev_queue;

				/* retry the rx flush */
				if (ev_failed)
					falcon_flush_rx_queue(rx_queue);
				else
					rx_queue->flushed = true;
			}
		}

1220
		read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1221
	} while (read_ptr != end_ptr);
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
}

/* Handle tx and rx flushes at the same time, since they run in
 * parallel in the hardware and there's no reason for us to
 * serialise them */
int falcon_flush_queues(struct efx_nic *efx)
{
	struct efx_rx_queue *rx_queue;
	struct efx_tx_queue *tx_queue;
	int i;
	bool outstanding;

	/* Issue flush requests */
	efx_for_each_tx_queue(tx_queue, efx) {
		tx_queue->flushed = false;
		falcon_flush_tx_queue(tx_queue);
	}
	efx_for_each_rx_queue(rx_queue, efx) {
		rx_queue->flushed = false;
		falcon_flush_rx_queue(rx_queue);
	}

	/* Poll the evq looking for flush completions. Since we're not pushing
	 * any more rx or tx descriptors at this point, we're in no danger of
	 * overflowing the evq whilst we wait */
	for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
		msleep(FALCON_FLUSH_INTERVAL);
		falcon_poll_flush_events(efx);

		/* Check if every queue has been succesfully flushed */
		outstanding = false;
		efx_for_each_tx_queue(tx_queue, efx)
			outstanding |= !tx_queue->flushed;
		efx_for_each_rx_queue(rx_queue, efx)
			outstanding |= !rx_queue->flushed;
		if (!outstanding)
			return 0;
	}

	/* Mark the queues as all flushed. We're going to return failure
	 * leading to a reset, or fake up success anyway. "flushed" now
	 * indicates that we tried to flush. */
	efx_for_each_tx_queue(tx_queue, efx) {
		if (!tx_queue->flushed)
			EFX_ERR(efx, "tx queue %d flush command timed out\n",
				tx_queue->queue);
		tx_queue->flushed = true;
	}
	efx_for_each_rx_queue(rx_queue, efx) {
		if (!rx_queue->flushed)
			EFX_ERR(efx, "rx queue %d flush command timed out\n",
				rx_queue->queue);
		rx_queue->flushed = true;
	}

	if (EFX_WORKAROUND_7803(efx))
		return 0;

	return -ETIMEDOUT;
}
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297

/**************************************************************************
 *
 * Falcon hardware interrupts
 * The hardware interrupt handler does very little work; all the event
 * queue processing is carried out by per-channel tasklets.
 *
 **************************************************************************/

/* Enable/disable/generate Falcon interrupts */
static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
				     int force)
{
	efx_oword_t int_en_reg_ker;

	EFX_POPULATE_OWORD_2(int_en_reg_ker,
1298 1299
			     FRF_AZ_KER_INT_KER, force,
			     FRF_AZ_DRV_INT_EN_KER, enabled);
1300
	efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
}

void falcon_enable_interrupts(struct efx_nic *efx)
{
	efx_oword_t int_adr_reg_ker;
	struct efx_channel *channel;

	EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
	wmb(); /* Ensure interrupt vector is clear before interrupts enabled */

	/* Program address */
	EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1313 1314 1315
			     FRF_AZ_NORM_INT_VEC_DIS_KER,
			     EFX_INT_MODE_USE_MSI(efx),
			     FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1316
	efx_writeo(efx, &int_adr_reg_ker, FR_AZ_INT_ADR_KER);
1317 1318 1319 1320 1321 1322

	/* Enable interrupts */
	falcon_interrupts(efx, 1, 0);

	/* Force processing of all the channels to get the EVQ RPTRs up to
	   date */
1323
	efx_for_each_channel(channel, efx)
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
		efx_schedule_channel(channel);
}

void falcon_disable_interrupts(struct efx_nic *efx)
{
	/* Disable interrupts */
	falcon_interrupts(efx, 0, 0);
}

/* Generate a Falcon test interrupt
 * Interrupt must already have been enabled, otherwise nasty things
 * may happen.
 */
void falcon_generate_interrupt(struct efx_nic *efx)
{
	falcon_interrupts(efx, 1, 1);
}

/* Acknowledge a legacy interrupt from Falcon
 *
 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
 *
 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
 * BIU. Interrupt acknowledge is read sensitive so must write instead
 * (then read to ensure the BIU collector is flushed)
 *
 * NB most hardware supports MSI interrupts
 */
static inline void falcon_irq_ack_a1(struct efx_nic *efx)
{
	efx_dword_t reg;

1356
	EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
1357 1358
	efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
	efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
1359 1360 1361 1362 1363 1364 1365 1366
}

/* Process a fatal interrupt
 * Disable bus mastering ASAP and schedule a reset
 */
static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;
1367
	efx_oword_t *int_ker = efx->irq_status.addr;
1368 1369 1370
	efx_oword_t fatal_intr;
	int error, mem_perr;

1371
	efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1372
	error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1373 1374 1375 1376 1377 1378 1379 1380 1381

	EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
		EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
		EFX_OWORD_VAL(fatal_intr),
		error ? "disabling bus mastering" : "no recognised error");
	if (error == 0)
		goto out;

	/* If this is a memory parity error dump which blocks are offending */
1382
	mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
1383 1384
	if (mem_perr) {
		efx_oword_t reg;
1385
		efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1386 1387 1388 1389
		EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
			EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
	}

1390
	/* Disable both devices */
1391
	pci_clear_master(efx->pci_dev);
1392
	if (FALCON_IS_DUAL_FUNC(efx))
1393
		pci_clear_master(nic_data->pci_dev2);
1394
	falcon_disable_interrupts(efx);
1395

1396
	/* Count errors and reset or disable the NIC accordingly */
1397 1398 1399 1400
	if (efx->int_error_count == 0 ||
	    time_after(jiffies, efx->int_error_expire)) {
		efx->int_error_count = 0;
		efx->int_error_expire =
1401 1402
			jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
	}
1403
	if (++efx->int_error_count < FALCON_MAX_INT_ERRORS) {
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
		EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
		efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
	} else {
		EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
			"NIC will be disabled\n");
		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
	}
out:
	return IRQ_HANDLED;
}

/* Handle a legacy interrupt from Falcon
 * Acknowledges the interrupt and schedule event queue processing.
 */
static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
{
1420 1421
	struct efx_nic *efx = dev_id;
	efx_oword_t *int_ker = efx->irq_status.addr;
1422
	irqreturn_t result = IRQ_NONE;
1423 1424 1425 1426 1427 1428
	struct efx_channel *channel;
	efx_dword_t reg;
	u32 queues;
	int syserr;

	/* Read the ISR which also ACKs the interrupts */
1429
	efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1430 1431 1432
	queues = EFX_EXTRACT_DWORD(reg, 0, 31);

	/* Check to see if we have a serious error condition */
1433
	syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1434 1435 1436 1437
	if (unlikely(syserr))
		return falcon_fatal_interrupt(efx);

	/* Schedule processing of any interrupting queues */
1438 1439 1440 1441
	efx_for_each_channel(channel, efx) {
		if ((queues & 1) ||
		    falcon_event_present(
			    falcon_event(channel, channel->eventq_read_ptr))) {
1442
			efx_schedule_channel(channel);
1443 1444
			result = IRQ_HANDLED;
		}
1445 1446 1447
		queues >>= 1;
	}

1448 1449 1450 1451 1452 1453 1454
	if (result == IRQ_HANDLED) {
		efx->last_irq_cpu = raw_smp_processor_id();
		EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
			  irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
	}

	return result;
1455 1456 1457 1458 1459
}


static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
1460 1461
	struct efx_nic *efx = dev_id;
	efx_oword_t *int_ker = efx->irq_status.addr;
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
	struct efx_channel *channel;
	int syserr;
	int queues;

	/* Check to see if this is our interrupt.  If it isn't, we
	 * exit without having touched the hardware.
	 */
	if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
		EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
			  raw_smp_processor_id());
		return IRQ_NONE;
	}
	efx->last_irq_cpu = raw_smp_processor_id();
	EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
		  irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));

	/* Check to see if we have a serious error condition */
1479
	syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
	if (unlikely(syserr))
		return falcon_fatal_interrupt(efx);

	/* Determine interrupting queues, clear interrupt status
	 * register and acknowledge the device interrupt.
	 */
	BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
	queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
	EFX_ZERO_OWORD(*int_ker);
	wmb(); /* Ensure the vector is cleared before interrupt ack */
	falcon_irq_ack_a1(efx);

	/* Schedule processing of any interrupting queues */
	channel = &efx->channel[0];
	while (queues) {
		if (queues & 0x01)
			efx_schedule_channel(channel);
		channel++;
		queues >>= 1;
	}

	return IRQ_HANDLED;
}

/* Handle an MSI interrupt from Falcon
 *
 * Handle an MSI hardware interrupt.  This routine schedules event
 * queue processing.  No interrupt acknowledgement cycle is necessary.
 * Also, we never need to check that the interrupt is for us, since
 * MSI interrupts cannot be shared.
 */
static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
{
1513
	struct efx_channel *channel = dev_id;
1514
	struct efx_nic *efx = channel->efx;
1515
	efx_oword_t *int_ker = efx->irq_status.addr;
1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
	int syserr;

	efx->last_irq_cpu = raw_smp_processor_id();
	EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
		  irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));

	/* Check to see if we have a serious error condition */
	syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
	if (unlikely(syserr))
		return falcon_fatal_interrupt(efx);

	/* Schedule processing of the channel */
	efx_schedule_channel(channel);

	return IRQ_HANDLED;
}


/* Setup RSS indirection table.
 * This maps from the hash value of the packet to RXQ
 */
static void falcon_setup_rss_indir_table(struct efx_nic *efx)
{
	int i = 0;
	unsigned long offset;
	efx_dword_t dword;

1543
	if (falcon_rev(efx) < FALCON_REV_B0)
1544 1545
		return;

1546 1547
	for (offset = FR_BZ_RX_INDIRECTION_TBL;
	     offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1548
	     offset += 0x10) {
1549
		EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1550
				     i % efx->n_rx_queues);
1551
		efx_writed(efx, &dword, offset);
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
		i++;
	}
}

/* Hook interrupt handler(s)
 * Try MSI and then legacy interrupts.
 */
int falcon_init_interrupt(struct efx_nic *efx)
{
	struct efx_channel *channel;
	int rc;

	if (!EFX_INT_MODE_USE_MSI(efx)) {
		irq_handler_t handler;
1566
		if (falcon_rev(efx) >= FALCON_REV_B0)
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
			handler = falcon_legacy_interrupt_b0;
		else
			handler = falcon_legacy_interrupt_a1;

		rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
				 efx->name, efx);
		if (rc) {
			EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
				efx->pci_dev->irq);
			goto fail1;
		}
		return 0;
	}

	/* Hook MSI or MSI-X interrupt */
1582
	efx_for_each_channel(channel, efx) {
1583 1584
		rc = request_irq(channel->irq, falcon_msi_interrupt,
				 IRQF_PROBE_SHARED, /* Not shared */
1585
				 channel->name, channel);
1586 1587 1588 1589 1590 1591 1592 1593 1594
		if (rc) {
			EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
			goto fail2;
		}
	}

	return 0;

 fail2:
1595
	efx_for_each_channel(channel, efx)
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
		free_irq(channel->irq, channel);
 fail1:
	return rc;
}

void falcon_fini_interrupt(struct efx_nic *efx)
{
	struct efx_channel *channel;
	efx_oword_t reg;

	/* Disable MSI/MSI-X interrupts */
1607
	efx_for_each_channel(channel, efx) {
1608 1609
		if (channel->irq)
			free_irq(channel->irq, channel);
1610
	}
1611 1612

	/* ACK legacy interrupt */
1613
	if (falcon_rev(efx) >= FALCON_REV_B0)
1614
		efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
	else
		falcon_irq_ack_a1(efx);

	/* Disable legacy interrupt */
	if (efx->legacy_irq)
		free_irq(efx->legacy_irq, efx);
}

/**************************************************************************
 *
 * EEPROM/flash
 *
 **************************************************************************
 */

1630
#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1631

1632 1633 1634
static int falcon_spi_poll(struct efx_nic *efx)
{
	efx_oword_t reg;
1635
	efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
1636
	return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1637 1638
}

1639 1640 1641
/* Wait for SPI command completion */
static int falcon_spi_wait(struct efx_nic *efx)
{
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
	/* Most commands will finish quickly, so we start polling at
	 * very short intervals.  Sometimes the command may have to
	 * wait for VPD or expansion ROM access outside of our
	 * control, so we allow up to 100 ms. */
	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
	int i;

	for (i = 0; i < 10; i++) {
		if (!falcon_spi_poll(efx))
			return 0;
		udelay(10);
	}
1654

1655
	for (;;) {
1656
		if (!falcon_spi_poll(efx))
1657
			return 0;
1658 1659 1660 1661
		if (time_after_eq(jiffies, timeout)) {
			EFX_ERR(efx, "timed out waiting for SPI\n");
			return -ETIMEDOUT;
		}
1662
		schedule_timeout_uninterruptible(1);
1663
	}
1664 1665
}

1666 1667
int falcon_spi_cmd(const struct efx_spi_device *spi,
		   unsigned int command, int address,
1668
		   const void *in, void *out, size_t len)
1669
{
1670 1671 1672
	struct efx_nic *efx = spi->efx;
	bool addressed = (address >= 0);
	bool reading = (out != NULL);
1673 1674 1675
	efx_oword_t reg;
	int rc;

1676 1677 1678
	/* Input validation */
	if (len > FALCON_SPI_MAX_LEN)
		return -EINVAL;
1679
	BUG_ON(!mutex_is_locked(&efx->spi_lock));
1680

1681 1682
	/* Check that previous command is not still running */
	rc = falcon_spi_poll(efx);
1683 1684 1685
	if (rc)
		return rc;

1686 1687
	/* Program address register, if we have an address */
	if (addressed) {
1688
		EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
1689
		efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
1690 1691 1692 1693 1694
	}

	/* Program data register, if we have data */
	if (in != NULL) {
		memcpy(&reg, in, len);
1695
		efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
1696
	}
1697

1698
	/* Issue read/write command */
1699
	EFX_POPULATE_OWORD_7(reg,
1700 1701 1702 1703 1704 1705
			     FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
			     FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
			     FRF_AB_EE_SPI_HCMD_DABCNT, len,
			     FRF_AB_EE_SPI_HCMD_READ, reading,
			     FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
			     FRF_AB_EE_SPI_HCMD_ADBCNT,
1706
			     (addressed ? spi->addr_len : 0),
1707
			     FRF_AB_EE_SPI_HCMD_ENC, command);
1708
	efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
1709

1710
	/* Wait for read/write to complete */
1711 1712 1713 1714 1715
	rc = falcon_spi_wait(efx);
	if (rc)
		return rc;

	/* Read data */
1716
	if (out != NULL) {
1717
		efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
1718 1719 1720
		memcpy(out, &reg, len);
	}

1721 1722 1723
	return 0;
}

1724 1725
static size_t
falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
{
	return min(FALCON_SPI_MAX_LEN,
		   (spi->block_size - (start & (spi->block_size - 1))));
}

static inline u8
efx_spi_munge_command(const struct efx_spi_device *spi,
		      const u8 command, const unsigned int address)
{
	return command | (((address >> 8) & spi->munge_address) << 3);
}

1738 1739
/* Wait up to 10 ms for buffered write completion */
int falcon_spi_wait_write(const struct efx_spi_device *spi)
1740
{
1741 1742
	struct efx_nic *efx = spi->efx;
	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
1743
	u8 status;
1744
	int rc;
1745

1746
	for (;;) {
1747 1748 1749 1750 1751 1752
		rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
				    &status, sizeof(status));
		if (rc)
			return rc;
		if (!(status & SPI_STATUS_NRDY))
			return 0;
1753 1754 1755 1756 1757 1758 1759
		if (time_after_eq(jiffies, timeout)) {
			EFX_ERR(efx, "SPI write timeout on device %d"
				" last status=0x%02x\n",
				spi->device_id, status);
			return -ETIMEDOUT;
		}
		schedule_timeout_uninterruptible(1);
1760 1761 1762 1763 1764 1765
	}
}

int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
		    size_t len, size_t *retlen, u8 *buffer)
{
1766 1767
	size_t block_len, pos = 0;
	unsigned int command;
1768 1769 1770
	int rc = 0;

	while (pos < len) {
1771
		block_len = min(len - pos, FALCON_SPI_MAX_LEN);
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796

		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
		rc = falcon_spi_cmd(spi, command, start + pos, NULL,
				    buffer + pos, block_len);
		if (rc)
			break;
		pos += block_len;

		/* Avoid locking up the system */
		cond_resched();
		if (signal_pending(current)) {
			rc = -EINTR;
			break;
		}
	}

	if (retlen)
		*retlen = pos;
	return rc;
}

int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
		     size_t len, size_t *retlen, const u8 *buffer)
{
	u8 verify_buffer[FALCON_SPI_MAX_LEN];
1797 1798
	size_t block_len, pos = 0;
	unsigned int command;
1799 1800 1801 1802 1803 1804 1805
	int rc = 0;

	while (pos < len) {
		rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
		if (rc)
			break;

1806
		block_len = min(len - pos,
1807 1808 1809 1810 1811 1812 1813
				falcon_spi_write_limit(spi, start + pos));
		command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
		rc = falcon_spi_cmd(spi, command, start + pos,
				    buffer + pos, NULL, block_len);
		if (rc)
			break;

1814
		rc = falcon_spi_wait_write(spi);
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
		if (rc)
			break;

		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
		rc = falcon_spi_cmd(spi, command, start + pos,
				    NULL, verify_buffer, block_len);
		if (memcmp(verify_buffer, buffer + pos, block_len)) {
			rc = -EIO;
			break;
		}

		pos += block_len;

		/* Avoid locking up the system */
		cond_resched();
		if (signal_pending(current)) {
			rc = -EINTR;
			break;
		}
	}

	if (retlen)
		*retlen = pos;
	return rc;
}

1841 1842 1843 1844 1845 1846
/**************************************************************************
 *
 * MAC wrapper
 *
 **************************************************************************
 */
1847 1848

static int falcon_reset_macs(struct efx_nic *efx)
1849
{
1850
	efx_oword_t reg;
1851 1852
	int count;

1853 1854 1855 1856 1857
	if (falcon_rev(efx) < FALCON_REV_B0) {
		/* It's not safe to use GLB_CTL_REG to reset the
		 * macs, so instead use the internal MAC resets
		 */
		if (!EFX_IS10G(efx)) {
1858
			EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
1859
			efx_writeo(efx, &reg, FR_AB_GM_CFG1);
1860 1861
			udelay(1000);

1862
			EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
1863
			efx_writeo(efx, &reg, FR_AB_GM_CFG1);
1864 1865 1866
			udelay(1000);
			return 0;
		} else {
1867
			EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1868
			efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1869 1870

			for (count = 0; count < 10000; count++) {
1871
				efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
1872 1873
				if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
				    0)
1874 1875 1876
					return 0;
				udelay(10);
			}
1877

1878 1879 1880 1881
			EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
			return -ETIMEDOUT;
		}
	}
1882 1883 1884

	/* MAC stats will fail whilst the TX fifo is draining. Serialise
	 * the drain sequence with the statistics fetch */
1885
	efx_stats_disable(efx);
1886

1887
	efx_reado(efx, &reg, FR_AB_MAC_CTRL);
1888
	EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 1);
1889
	efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
1890

1891
	efx_reado(efx, &reg, FR_AB_GLB_CTL);
1892 1893 1894
	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1895
	efx_writeo(efx, &reg, FR_AB_GLB_CTL);
1896 1897 1898

	count = 0;
	while (1) {
1899
		efx_reado(efx, &reg, FR_AB_GLB_CTL);
1900 1901 1902
		if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
			EFX_LOG(efx, "Completed MAC reset after %d loops\n",
				count);
			break;
		}
		if (count > 20) {
			EFX_ERR(efx, "MAC reset failed\n");
			break;
		}
		count++;
		udelay(10);
	}

1915
	efx_stats_enable(efx);
1916 1917 1918

	/* If we've reset the EM block and the link is up, then
	 * we'll have to kick the XAUI link so the PHY can recover */
1919
	if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
1920
		falcon_reset_xaui(efx);
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932

	return 0;
}

void falcon_drain_tx_fifo(struct efx_nic *efx)
{
	efx_oword_t reg;

	if ((falcon_rev(efx) < FALCON_REV_B0) ||
	    (efx->loopback_mode != LOOPBACK_NONE))
		return;

1933
	efx_reado(efx, &reg, FR_AB_MAC_CTRL);
1934
	/* There is no point in draining more than once */
1935
	if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1936 1937 1938
		return;

	falcon_reset_macs(efx);
1939 1940 1941 1942
}

void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
{
1943
	efx_oword_t reg;
1944

1945
	if (falcon_rev(efx) < FALCON_REV_B0)
1946 1947 1948
		return;

	/* Isolate the MAC -> RX */
1949
	efx_reado(efx, &reg, FR_AZ_RX_CFG);
1950
	EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1951
	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1952 1953 1954 1955 1956 1957 1958 1959 1960

	if (!efx->link_up)
		falcon_drain_tx_fifo(efx);
}

void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
{
	efx_oword_t reg;
	int link_speed;
1961
	bool tx_fc;
1962

B
Ben Hutchings 已提交
1963 1964 1965 1966 1967 1968
	switch (efx->link_speed) {
	case 10000: link_speed = 3; break;
	case 1000:  link_speed = 2; break;
	case 100:   link_speed = 1; break;
	default:    link_speed = 0; break;
	}
1969 1970 1971 1972 1973
	/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
	 * as advertised.  Disable to ensure packets are not
	 * indefinitely held and TX queue can be flushed at any point
	 * while the link is down. */
	EFX_POPULATE_OWORD_5(reg,
1974 1975 1976 1977 1978
			     FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
			     FRF_AB_MAC_BCAD_ACPT, 1,
			     FRF_AB_MAC_UC_PROM, efx->promiscuous,
			     FRF_AB_MAC_LINK_STATUS, 1, /* always set */
			     FRF_AB_MAC_SPEED, link_speed);
1979 1980
	/* On B0, MAC backpressure can be disabled and packets get
	 * discarded. */
1981
	if (falcon_rev(efx) >= FALCON_REV_B0) {
1982
		EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1983 1984 1985
				    !efx->link_up);
	}

1986
	efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
1987 1988 1989 1990 1991 1992 1993

	/* Restore the multicast hash registers. */
	falcon_set_multicast_hash(efx);

	/* Transmission of pause frames when RX crosses the threshold is
	 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
	 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
B
Ben Hutchings 已提交
1994
	tx_fc = !!(efx->link_fc & EFX_FC_TX);
1995
	efx_reado(efx, &reg, FR_AZ_RX_CFG);
1996
	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, tx_fc);
1997 1998

	/* Unisolate the MAC -> RX */
1999
	if (falcon_rev(efx) >= FALCON_REV_B0)
2000
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2001
	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
}

int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
{
	efx_oword_t reg;
	u32 *dma_done;
	int i;

	if (disable_dma_stats)
		return 0;

	/* Statistics fetch will fail if the MAC is in TX drain */
2014
	if (falcon_rev(efx) >= FALCON_REV_B0) {
2015
		efx_oword_t temp;
2016
		efx_reado(efx, &temp, FR_AB_MAC_CTRL);
2017
		if (EFX_OWORD_FIELD(temp, FRF_BB_TXFIFO_DRAIN_EN))
2018 2019 2020 2021 2022 2023 2024 2025 2026
			return 0;
	}

	dma_done = (efx->stats_buffer.addr + done_offset);
	*dma_done = FALCON_STATS_NOT_DONE;
	wmb(); /* ensure done flag is clear */

	/* Initiate DMA transfer of stats */
	EFX_POPULATE_OWORD_2(reg,
2027 2028
			     FRF_AB_MAC_STAT_DMA_CMD, 1,
			     FRF_AB_MAC_STAT_DMA_ADR,
2029
			     efx->stats_buffer.dma_addr);
2030
	efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
2031 2032 2033

	/* Wait for transfer to complete */
	for (i = 0; i < 400; i++) {
2034 2035
		if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
			rmb(); /* Ensure the stats are valid. */
2036
			return 0;
2037
		}
2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
		udelay(10);
	}

	EFX_ERR(efx, "timed out waiting for statistics\n");
	return -ETIMEDOUT;
}

/**************************************************************************
 *
 * PHY access via GMII
 *
 **************************************************************************
 */

/* Wait for GMII access to complete */
static int falcon_gmii_wait(struct efx_nic *efx)
{
	efx_dword_t md_stat;
	int count;

2058 2059
	/* wait upto 50ms - taken max from datasheet */
	for (count = 0; count < 5000; count++) {
2060
		efx_readd(efx, &md_stat, FR_AB_MD_STAT);
2061 2062 2063
		if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
			if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
			    EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
				EFX_ERR(efx, "error from GMII access "
					EFX_DWORD_FMT"\n",
					EFX_DWORD_VAL(md_stat));
				return -EIO;
			}
			return 0;
		}
		udelay(10);
	}
	EFX_ERR(efx, "timed out waiting for GMII\n");
	return -ETIMEDOUT;
}

2077 2078 2079
/* Write an MDIO register of a PHY connected to Falcon. */
static int falcon_mdio_write(struct net_device *net_dev,
			     int prtad, int devad, u16 addr, u16 value)
2080
{
2081
	struct efx_nic *efx = netdev_priv(net_dev);
2082
	efx_oword_t reg;
2083
	int rc;
2084

2085 2086
	EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
		    prtad, devad, addr, value);
2087 2088 2089

	spin_lock_bh(&efx->phy_lock);

2090 2091 2092
	/* Check MDIO not currently being accessed */
	rc = falcon_gmii_wait(efx);
	if (rc)
2093 2094 2095
		goto out;

	/* Write the address/ID register */
2096
	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2097
	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
2098

2099 2100
	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
			     FRF_AB_MD_DEV_ADR, devad);
2101
	efx_writeo(efx, &reg, FR_AB_MD_ID);
2102 2103

	/* Write data */
2104
	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
2105
	efx_writeo(efx, &reg, FR_AB_MD_TXD);
2106 2107

	EFX_POPULATE_OWORD_2(reg,
2108 2109
			     FRF_AB_MD_WRC, 1,
			     FRF_AB_MD_GC, 0);
2110
	efx_writeo(efx, &reg, FR_AB_MD_CS);
2111 2112

	/* Wait for data to be written */
2113 2114
	rc = falcon_gmii_wait(efx);
	if (rc) {
2115 2116
		/* Abort the write operation */
		EFX_POPULATE_OWORD_2(reg,
2117 2118
				     FRF_AB_MD_WRC, 0,
				     FRF_AB_MD_GC, 1);
2119
		efx_writeo(efx, &reg, FR_AB_MD_CS);
2120 2121 2122 2123 2124
		udelay(10);
	}

 out:
	spin_unlock_bh(&efx->phy_lock);
2125
	return rc;
2126 2127
}

2128 2129 2130
/* Read an MDIO register of a PHY connected to Falcon. */
static int falcon_mdio_read(struct net_device *net_dev,
			    int prtad, int devad, u16 addr)
2131
{
2132
	struct efx_nic *efx = netdev_priv(net_dev);
2133
	efx_oword_t reg;
2134
	int rc;
2135 2136 2137

	spin_lock_bh(&efx->phy_lock);

2138 2139 2140
	/* Check MDIO not currently being accessed */
	rc = falcon_gmii_wait(efx);
	if (rc)
2141 2142
		goto out;

2143
	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2144
	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
2145

2146 2147
	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
			     FRF_AB_MD_DEV_ADR, devad);
2148
	efx_writeo(efx, &reg, FR_AB_MD_ID);
2149 2150

	/* Request data to be read */
2151
	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
2152
	efx_writeo(efx, &reg, FR_AB_MD_CS);
2153 2154

	/* Wait for data to become available */
2155 2156
	rc = falcon_gmii_wait(efx);
	if (rc == 0) {
2157
		efx_reado(efx, &reg, FR_AB_MD_RXD);
2158
		rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
2159 2160
		EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
			    prtad, devad, addr, rc);
2161 2162 2163
	} else {
		/* Abort the read operation */
		EFX_POPULATE_OWORD_2(reg,
2164 2165
				     FRF_AB_MD_RIC, 0,
				     FRF_AB_MD_GC, 1);
2166
		efx_writeo(efx, &reg, FR_AB_MD_CS);
2167

2168 2169
		EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
			prtad, devad, addr, rc);
2170 2171 2172 2173
	}

 out:
	spin_unlock_bh(&efx->phy_lock);
2174
	return rc;
2175 2176
}

2177 2178 2179 2180 2181
int falcon_switch_mac(struct efx_nic *efx)
{
	struct efx_mac_operations *old_mac_op = efx->mac_op;
	efx_oword_t nic_stat;
	unsigned strap_val;
2182 2183 2184 2185
	int rc = 0;

	/* Don't try to fetch MAC stats while we're switching MACs */
	efx_stats_disable(efx);
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195

	/* Internal loopbacks override the phy speed setting */
	if (efx->loopback_mode == LOOPBACK_GMAC) {
		efx->link_speed = 1000;
		efx->link_fd = true;
	} else if (LOOPBACK_INTERNAL(efx)) {
		efx->link_speed = 10000;
		efx->link_fd = true;
	}

2196
	WARN_ON(!mutex_is_locked(&efx->mac_lock));
2197 2198 2199
	efx->mac_op = (EFX_IS10G(efx) ?
		       &falcon_xmac_operations : &falcon_gmac_operations);

2200 2201
	/* Always push the NIC_STAT_REG setting even if the mac hasn't
	 * changed, because this function is run post online reset */
2202
	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2203 2204
	strap_val = EFX_IS10G(efx) ? 5 : 3;
	if (falcon_rev(efx) >= FALCON_REV_B0) {
2205 2206
		EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
		EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
2207
		efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
2208 2209 2210
	} else {
		/* Falcon A1 does not support 1G/10G speed switching
		 * and must not be used with a PHY that does. */
2211 2212
		BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
		       strap_val);
2213 2214
	}

2215
	if (old_mac_op == efx->mac_op)
2216
		goto out;
2217 2218

	EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
2219 2220 2221
	/* Not all macs support a mac-level link state */
	efx->mac_up = true;

2222 2223 2224 2225
	rc = falcon_reset_macs(efx);
out:
	efx_stats_enable(efx);
	return rc;
2226 2227
}

2228 2229 2230 2231 2232
/* This call is responsible for hooking in the MAC and PHY operations */
int falcon_probe_port(struct efx_nic *efx)
{
	int rc;

2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
	switch (efx->phy_type) {
	case PHY_TYPE_SFX7101:
		efx->phy_op = &falcon_sfx7101_phy_ops;
		break;
	case PHY_TYPE_SFT9001A:
	case PHY_TYPE_SFT9001B:
		efx->phy_op = &falcon_sft9001_phy_ops;
		break;
	case PHY_TYPE_QT2022C2:
	case PHY_TYPE_QT2025C:
2243
		efx->phy_op = &falcon_qt202x_phy_ops;
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257
		break;
	default:
		EFX_ERR(efx, "Unknown PHY type %d\n",
			efx->phy_type);
		return -ENODEV;
	}

	if (efx->phy_op->macs & EFX_XMAC)
		efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
					(1 << LOOPBACK_XGXS) |
					(1 << LOOPBACK_XAUI));
	if (efx->phy_op->macs & EFX_GMAC)
		efx->loopback_modes |= (1 << LOOPBACK_GMAC);
	efx->loopback_modes |= efx->phy_op->loopbacks;
2258

2259 2260 2261 2262 2263
	/* Set up MDIO structure for PHY */
	efx->mdio.mmds = efx->phy_op->mmds;
	efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
	efx->mdio.mdio_read = falcon_mdio_read;
	efx->mdio.mdio_write = falcon_mdio_write;
2264 2265

	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2266
	if (falcon_rev(efx) >= FALCON_REV_B0)
B
Ben Hutchings 已提交
2267
		efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
2268
	else
B
Ben Hutchings 已提交
2269
		efx->wanted_fc = EFX_FC_RX;
2270 2271 2272 2273 2274 2275

	/* Allocate buffer for stats */
	rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
				 FALCON_MAC_STATS_SIZE);
	if (rc)
		return rc;
2276 2277
	EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
		(u64)efx->stats_buffer.dma_addr,
2278
		efx->stats_buffer.addr,
2279
		(u64)virt_to_phys(efx->stats_buffer.addr));
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305

	return 0;
}

void falcon_remove_port(struct efx_nic *efx)
{
	falcon_free_buffer(efx, &efx->stats_buffer);
}

/**************************************************************************
 *
 * Multicast filtering
 *
 **************************************************************************
 */

void falcon_set_multicast_hash(struct efx_nic *efx)
{
	union efx_multicast_hash *mc_hash = &efx->multicast_hash;

	/* Broadcast packets go through the multicast hash filter.
	 * ether_crc_le() of the broadcast address is 0xbe2612ff
	 * so we always add bit 0xff to the mask.
	 */
	set_bit_le(0xff, mc_hash->byte);

2306 2307
	efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
	efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
2308 2309
}

B
Ben Hutchings 已提交
2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325

/**************************************************************************
 *
 * Falcon test code
 *
 **************************************************************************/

int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
{
	struct falcon_nvconfig *nvconfig;
	struct efx_spi_device *spi;
	void *region;
	int rc, magic_num, struct_ver;
	__le16 *word, *limit;
	u32 csum;

2326 2327 2328 2329
	spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
	if (!spi)
		return -EINVAL;

2330
	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
B
Ben Hutchings 已提交
2331 2332
	if (!region)
		return -ENOMEM;
2333
	nvconfig = region + FALCON_NVCONFIG_OFFSET;
B
Ben Hutchings 已提交
2334

2335
	mutex_lock(&efx->spi_lock);
2336
	rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
2337
	mutex_unlock(&efx->spi_lock);
B
Ben Hutchings 已提交
2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
	if (rc) {
		EFX_ERR(efx, "Failed to read %s\n",
			efx->spi_flash ? "flash" : "EEPROM");
		rc = -EIO;
		goto out;
	}

	magic_num = le16_to_cpu(nvconfig->board_magic_num);
	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);

	rc = -EINVAL;
2349
	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
B
Ben Hutchings 已提交
2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
		EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
		goto out;
	}
	if (struct_ver < 2) {
		EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
		goto out;
	} else if (struct_ver < 4) {
		word = &nvconfig->board_magic_num;
		limit = (__le16 *) (nvconfig + 1);
	} else {
		word = region;
2361
		limit = region + FALCON_NVCONFIG_END;
B
Ben Hutchings 已提交
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384
	}
	for (csum = 0; word < limit; ++word)
		csum += le16_to_cpu(*word);

	if (~csum & 0xffff) {
		EFX_ERR(efx, "NVRAM has incorrect checksum\n");
		goto out;
	}

	rc = 0;
	if (nvconfig_out)
		memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));

 out:
	kfree(region);
	return rc;
}

/* Registers tested in the falcon register test */
static struct {
	unsigned address;
	efx_oword_t mask;
} efx_test_registers[] = {
2385
	{ FR_AZ_ADR_REGION,
B
Ben Hutchings 已提交
2386
	  EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2387
	{ FR_AZ_RX_CFG,
B
Ben Hutchings 已提交
2388
	  EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2389
	{ FR_AZ_TX_CFG,
B
Ben Hutchings 已提交
2390
	  EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2391
	{ FR_AZ_TX_RESERVED,
B
Ben Hutchings 已提交
2392
	  EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2393
	{ FR_AB_MAC_CTRL,
B
Ben Hutchings 已提交
2394
	  EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2395
	{ FR_AZ_SRM_TX_DC_CFG,
B
Ben Hutchings 已提交
2396
	  EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2397
	{ FR_AZ_RX_DC_CFG,
B
Ben Hutchings 已提交
2398
	  EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2399
	{ FR_AZ_RX_DC_PF_WM,
B
Ben Hutchings 已提交
2400
	  EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2401
	{ FR_BZ_DP_CTRL,
B
Ben Hutchings 已提交
2402
	  EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2403
	{ FR_AB_GM_CFG2,
2404
	  EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2405
	{ FR_AB_GMF_CFG0,
2406
	  EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2407
	{ FR_AB_XM_GLB_CFG,
B
Ben Hutchings 已提交
2408
	  EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2409
	{ FR_AB_XM_TX_CFG,
B
Ben Hutchings 已提交
2410
	  EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2411
	{ FR_AB_XM_RX_CFG,
B
Ben Hutchings 已提交
2412
	  EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2413
	{ FR_AB_XM_RX_PARAM,
B
Ben Hutchings 已提交
2414
	  EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2415
	{ FR_AB_XM_FC,
B
Ben Hutchings 已提交
2416
	  EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2417
	{ FR_AB_XM_ADR_LO,
B
Ben Hutchings 已提交
2418
	  EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2419
	{ FR_AB_XX_SD_CTL,
B
Ben Hutchings 已提交
2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
	  EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
};

static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
				     const efx_oword_t *mask)
{
	return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
		((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
}

int falcon_test_registers(struct efx_nic *efx)
{
	unsigned address = 0, i, j;
	efx_oword_t mask, imask, original, reg, buf;

	/* Falcon should be in loopback to isolate the XMAC from the PHY */
	WARN_ON(!LOOPBACK_INTERNAL(efx));

	for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
		address = efx_test_registers[i].address;
		mask = imask = efx_test_registers[i].mask;
		EFX_INVERT_OWORD(imask);

2443
		efx_reado(efx, &original, address);
B
Ben Hutchings 已提交
2444 2445 2446 2447 2448 2449 2450 2451 2452 2453

		/* bit sweep on and off */
		for (j = 0; j < 128; j++) {
			if (!EFX_EXTRACT_OWORD32(mask, j, j))
				continue;

			/* Test this testable bit can be set in isolation */
			EFX_AND_OWORD(reg, original, mask);
			EFX_SET_OWORD32(reg, j, j, 1);

2454 2455
			efx_writeo(efx, &reg, address);
			efx_reado(efx, &buf, address);
B
Ben Hutchings 已提交
2456 2457 2458 2459 2460 2461 2462 2463

			if (efx_masked_compare_oword(&reg, &buf, &mask))
				goto fail;

			/* Test this testable bit can be cleared in isolation */
			EFX_OR_OWORD(reg, original, mask);
			EFX_SET_OWORD32(reg, j, j, 0);

2464 2465
			efx_writeo(efx, &reg, address);
			efx_reado(efx, &buf, address);
B
Ben Hutchings 已提交
2466 2467 2468 2469 2470

			if (efx_masked_compare_oword(&reg, &buf, &mask))
				goto fail;
		}

2471
		efx_writeo(efx, &original, address);
B
Ben Hutchings 已提交
2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
	}

	return 0;

fail:
	EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
		" at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
		EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
	return -EIO;
}

2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518
/**************************************************************************
 *
 * Device reset
 *
 **************************************************************************
 */

/* Resets NIC to known state.  This routine must be called in process
 * context and is allowed to sleep. */
int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
{
	struct falcon_nic_data *nic_data = efx->nic_data;
	efx_oword_t glb_ctl_reg_ker;
	int rc;

	EFX_LOG(efx, "performing hardware reset (%d)\n", method);

	/* Initiate device reset */
	if (method == RESET_TYPE_WORLD) {
		rc = pci_save_state(efx->pci_dev);
		if (rc) {
			EFX_ERR(efx, "failed to backup PCI state of primary "
				"function prior to hardware reset\n");
			goto fail1;
		}
		if (FALCON_IS_DUAL_FUNC(efx)) {
			rc = pci_save_state(nic_data->pci_dev2);
			if (rc) {
				EFX_ERR(efx, "failed to backup PCI state of "
					"secondary function prior to "
					"hardware reset\n");
				goto fail2;
			}
		}

		EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2519 2520 2521
				     FRF_AB_EXT_PHY_RST_DUR,
				     FFE_AB_EXT_PHY_RST_DUR_10240US,
				     FRF_AB_SWRST, 1);
2522 2523
	} else {
		EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535
				     /* exclude PHY from "invisible" reset */
				     FRF_AB_EXT_PHY_RST_CTL,
				     method == RESET_TYPE_INVISIBLE,
				     /* exclude EEPROM/flash and PCIe */
				     FRF_AB_PCIE_CORE_RST_CTL, 1,
				     FRF_AB_PCIE_NSTKY_RST_CTL, 1,
				     FRF_AB_PCIE_SD_RST_CTL, 1,
				     FRF_AB_EE_RST_CTL, 1,
				     FRF_AB_EXT_PHY_RST_DUR,
				     FFE_AB_EXT_PHY_RST_DUR_10240US,
				     FRF_AB_SWRST, 1);
	}
2536
	efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560

	EFX_LOG(efx, "waiting for hardware reset\n");
	schedule_timeout_uninterruptible(HZ / 20);

	/* Restore PCI configuration if needed */
	if (method == RESET_TYPE_WORLD) {
		if (FALCON_IS_DUAL_FUNC(efx)) {
			rc = pci_restore_state(nic_data->pci_dev2);
			if (rc) {
				EFX_ERR(efx, "failed to restore PCI config for "
					"the secondary function\n");
				goto fail3;
			}
		}
		rc = pci_restore_state(efx->pci_dev);
		if (rc) {
			EFX_ERR(efx, "failed to restore PCI config for the "
				"primary function\n");
			goto fail4;
		}
		EFX_LOG(efx, "successfully restored PCI config\n");
	}

	/* Assert that reset complete */
2561
	efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2562
	if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
		rc = -ETIMEDOUT;
		EFX_ERR(efx, "timed out waiting for hardware reset\n");
		goto fail5;
	}
	EFX_LOG(efx, "hardware reset complete\n");

	return 0;

	/* pci_save_state() and pci_restore_state() MUST be called in pairs */
fail2:
fail3:
	pci_restore_state(efx->pci_dev);
fail1:
fail4:
fail5:
	return rc;
}

/* Zeroes out the SRAM contents.  This routine must be called in
 * process context and is allowed to sleep.
 */
static int falcon_reset_sram(struct efx_nic *efx)
{
	efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
	int count;

	/* Set the SRAM wake/sleep GPIO appropriately. */
2590
	efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2591 2592
	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2593
	efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2594 2595 2596

	/* Initiate SRAM reset */
	EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2597 2598
			     FRF_AZ_SRM_INIT_EN, 1,
			     FRF_AZ_SRM_NB_SZ, 0);
2599
	efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609

	/* Wait for SRAM reset to complete */
	count = 0;
	do {
		EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);

		/* SRAM reset is slow; expect around 16ms */
		schedule_timeout_uninterruptible(HZ / 50);

		/* Check for reset complete */
2610
		efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2611
		if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
			EFX_LOG(efx, "SRAM reset complete\n");

			return 0;
		}
	} while (++count < 20);	/* wait upto 0.4 sec */

	EFX_ERR(efx, "timed out waiting for SRAM reset\n");
	return -ETIMEDOUT;
}

2622 2623 2624 2625 2626 2627 2628
static int falcon_spi_device_init(struct efx_nic *efx,
				  struct efx_spi_device **spi_device_ret,
				  unsigned int device_id, u32 device_type)
{
	struct efx_spi_device *spi_device;

	if (device_type != 0) {
2629
		spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
2630 2631 2632 2633 2634 2635 2636 2637 2638
		if (!spi_device)
			return -ENOMEM;
		spi_device->device_id = device_id;
		spi_device->size =
			1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
		spi_device->addr_len =
			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
		spi_device->munge_address = (spi_device->size == 1 << 9 &&
					     spi_device->addr_len == 1);
2639 2640 2641 2642 2643
		spi_device->erase_command =
			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
		spi_device->erase_size =
			1 << SPI_DEV_TYPE_FIELD(device_type,
						SPI_DEV_TYPE_ERASE_SIZE);
2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666
		spi_device->block_size =
			1 << SPI_DEV_TYPE_FIELD(device_type,
						SPI_DEV_TYPE_BLOCK_SIZE);

		spi_device->efx = efx;
	} else {
		spi_device = NULL;
	}

	kfree(*spi_device_ret);
	*spi_device_ret = spi_device;
	return 0;
}


static void falcon_remove_spi_devices(struct efx_nic *efx)
{
	kfree(efx->spi_eeprom);
	efx->spi_eeprom = NULL;
	kfree(efx->spi_flash);
	efx->spi_flash = NULL;
}

2667 2668 2669 2670
/* Extract non-volatile configuration */
static int falcon_probe_nvconfig(struct efx_nic *efx)
{
	struct falcon_nvconfig *nvconfig;
B
Ben Hutchings 已提交
2671
	int board_rev;
2672 2673 2674
	int rc;

	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2675 2676
	if (!nvconfig)
		return -ENOMEM;
2677

B
Ben Hutchings 已提交
2678 2679 2680
	rc = falcon_read_nvram(efx, nvconfig);
	if (rc == -EINVAL) {
		EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
2681
		efx->phy_type = PHY_TYPE_NONE;
2682
		efx->mdio.prtad = MDIO_PRTAD_NONE;
2683
		board_rev = 0;
B
Ben Hutchings 已提交
2684 2685 2686
		rc = 0;
	} else if (rc) {
		goto fail1;
2687 2688
	} else {
		struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
2689
		struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
2690 2691

		efx->phy_type = v2->port0_phy_type;
2692
		efx->mdio.prtad = v2->port0_phy_addr;
2693
		board_rev = le16_to_cpu(v2->board_revision);
2694

B
Ben Hutchings 已提交
2695
		if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2696 2697 2698 2699
			rc = falcon_spi_device_init(
				efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
				le32_to_cpu(v3->spi_device_type
					    [FFE_AB_SPI_DEVICE_FLASH]));
2700 2701
			if (rc)
				goto fail2;
2702 2703 2704 2705
			rc = falcon_spi_device_init(
				efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
				le32_to_cpu(v3->spi_device_type
					    [FFE_AB_SPI_DEVICE_EEPROM]));
2706 2707 2708
			if (rc)
				goto fail2;
		}
2709 2710
	}

B
Ben Hutchings 已提交
2711 2712 2713
	/* Read the MAC addresses */
	memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);

2714
	EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
2715

2716
	falcon_probe_board(efx, board_rev);
2717

2718 2719 2720 2721 2722 2723
	kfree(nvconfig);
	return 0;

 fail2:
	falcon_remove_spi_devices(efx);
 fail1:
2724 2725 2726 2727 2728 2729 2730 2731 2732 2733
	kfree(nvconfig);
	return rc;
}

/* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
 * count, port speed).  Set workaround and feature flags accordingly.
 */
static int falcon_probe_nic_variant(struct efx_nic *efx)
{
	efx_oword_t altera_build;
2734
	efx_oword_t nic_stat;
2735

2736
	efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
2737
	if (EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER)) {
2738 2739 2740 2741
		EFX_ERR(efx, "Falcon FPGA not supported\n");
		return -ENODEV;
	}

2742
	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2743

2744
	switch (falcon_rev(efx)) {
2745 2746 2747 2748 2749
	case FALCON_REV_A0:
	case 0xff:
		EFX_ERR(efx, "Falcon rev A0 not supported\n");
		return -ENODEV;

2750
	case FALCON_REV_A1:
2751
		if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2752 2753 2754 2755 2756 2757 2758 2759 2760
			EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
			return -ENODEV;
		}
		break;

	case FALCON_REV_B0:
		break;

	default:
2761
		EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
2762 2763 2764
		return -ENODEV;
	}

2765
	/* Initial assumed speed */
2766
	efx->link_speed = EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) ? 10000 : 1000;
2767

2768 2769 2770
	return 0;
}

2771 2772 2773 2774
/* Probe all SPI devices on the NIC */
static void falcon_probe_spi_devices(struct efx_nic *efx)
{
	efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2775
	int boot_dev;
2776

2777 2778 2779
	efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
	efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2780

2781 2782 2783
	if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
		boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2784
		EFX_LOG(efx, "Booted from %s\n",
2785
			boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
2786 2787 2788 2789 2790 2791
	} else {
		/* Disable VPD and set clock dividers to safe
		 * values for initial programming. */
		boot_dev = -1;
		EFX_LOG(efx, "Booted from internal ASIC settings;"
			" setting SPI config\n");
2792
		EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2793
				     /* 125 MHz / 7 ~= 20 MHz */
2794
				     FRF_AB_EE_SF_CLOCK_DIV, 7,
2795
				     /* 125 MHz / 63 ~= 2 MHz */
2796
				     FRF_AB_EE_EE_CLOCK_DIV, 63);
2797
		efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2798 2799
	}

2800 2801 2802
	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
		falcon_spi_device_init(efx, &efx->spi_flash,
				       FFE_AB_SPI_DEVICE_FLASH,
2803
				       default_flash_type);
2804 2805 2806
	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
		falcon_spi_device_init(efx, &efx->spi_eeprom,
				       FFE_AB_SPI_DEVICE_EEPROM,
2807
				       large_eeprom_type);
2808 2809
}

2810 2811 2812 2813 2814 2815 2816
int falcon_probe_nic(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data;
	int rc;

	/* Allocate storage for hardware specific data */
	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2817 2818
	if (!nic_data)
		return -ENOMEM;
2819
	efx->nic_data = nic_data;
2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857

	/* Determine number of ports etc. */
	rc = falcon_probe_nic_variant(efx);
	if (rc)
		goto fail1;

	/* Probe secondary function if expected */
	if (FALCON_IS_DUAL_FUNC(efx)) {
		struct pci_dev *dev = pci_dev_get(efx->pci_dev);

		while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
					     dev))) {
			if (dev->bus == efx->pci_dev->bus &&
			    dev->devfn == efx->pci_dev->devfn + 1) {
				nic_data->pci_dev2 = dev;
				break;
			}
		}
		if (!nic_data->pci_dev2) {
			EFX_ERR(efx, "failed to find secondary function\n");
			rc = -ENODEV;
			goto fail2;
		}
	}

	/* Now we can reset the NIC */
	rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
	if (rc) {
		EFX_ERR(efx, "failed to reset NIC\n");
		goto fail3;
	}

	/* Allocate memory for INT_KER */
	rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
	if (rc)
		goto fail4;
	BUG_ON(efx->irq_status.dma_addr & 0x0f);

2858 2859 2860
	EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
		(u64)efx->irq_status.dma_addr,
		efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
2861

2862 2863
	falcon_probe_spi_devices(efx);

2864 2865 2866 2867 2868
	/* Read in the non-volatile configuration */
	rc = falcon_probe_nvconfig(efx);
	if (rc)
		goto fail5;

2869
	/* Initialise I2C adapter */
B
Ben Hutchings 已提交
2870
	efx->i2c_adap.owner = THIS_MODULE;
2871 2872
	nic_data->i2c_data = falcon_i2c_bit_operations;
	nic_data->i2c_data.data = efx;
B
Ben Hutchings 已提交
2873
	efx->i2c_adap.algo_data = &nic_data->i2c_data;
2874
	efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
2875
	strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name));
2876 2877 2878 2879
	rc = i2c_bit_add_bus(&efx->i2c_adap);
	if (rc)
		goto fail5;

2880 2881 2882 2883 2884 2885
	rc = falcon_board(efx)->init(efx);
	if (rc) {
		EFX_ERR(efx, "failed to initialise board\n");
		goto fail6;
	}

2886 2887
	return 0;

2888 2889 2890
 fail6:
	BUG_ON(i2c_del_adapter(&efx->i2c_adap));
	memset(&efx->i2c_adap, 0, sizeof(efx->i2c_adap));
2891
 fail5:
2892
	falcon_remove_spi_devices(efx);
2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905
	falcon_free_buffer(efx, &efx->irq_status);
 fail4:
 fail3:
	if (nic_data->pci_dev2) {
		pci_dev_put(nic_data->pci_dev2);
		nic_data->pci_dev2 = NULL;
	}
 fail2:
 fail1:
	kfree(efx->nic_data);
	return rc;
}

2906 2907 2908 2909 2910 2911 2912 2913 2914 2915
static void falcon_init_rx_cfg(struct efx_nic *efx)
{
	/* Prior to Siena the RX DMA engine will split each frame at
	 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
	 * be so large that that never happens. */
	const unsigned huge_buf_size = (3 * 4096) >> 5;
	/* RX control FIFO thresholds (32 entries) */
	const unsigned ctrl_xon_thr = 20;
	const unsigned ctrl_xoff_thr = 25;
	/* RX data FIFO thresholds (256-byte units; size varies) */
2916 2917
	int data_xon_thr = rx_xon_thresh_bytes >> 8;
	int data_xoff_thr = rx_xoff_thresh_bytes >> 8;
2918 2919
	efx_oword_t reg;

2920
	efx_reado(efx, &reg, FR_AZ_RX_CFG);
2921
	if (falcon_rev(efx) <= FALCON_REV_A1) {
2922 2923 2924 2925 2926
		/* Data FIFO size is 5.5K */
		if (data_xon_thr < 0)
			data_xon_thr = 512 >> 8;
		if (data_xoff_thr < 0)
			data_xoff_thr = 2048 >> 8;
2927 2928 2929 2930 2931 2932 2933
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
				    huge_buf_size);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2934
	} else {
2935 2936 2937 2938 2939
		/* Data FIFO size is 80K; register fields moved */
		if (data_xon_thr < 0)
			data_xon_thr = 27648 >> 8; /* ~3*max MTU */
		if (data_xoff_thr < 0)
			data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
2940 2941 2942 2943 2944 2945 2946 2947
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
				    huge_buf_size);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2948
	}
2949
	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
2950 2951
}

2952 2953 2954 2955 2956 2957 2958 2959 2960 2961
/* This call performs hardware-specific global initialisation, such as
 * defining the descriptor cache sizes and number of RSS channels.
 * It does not set up any buffers, descriptor rings or event queues.
 */
int falcon_init_nic(struct efx_nic *efx)
{
	efx_oword_t temp;
	int rc;

	/* Use on-chip SRAM */
2962
	efx_reado(efx, &temp, FR_AB_NIC_STAT);
2963
	EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2964
	efx_writeo(efx, &temp, FR_AB_NIC_STAT);
2965

B
Ben Hutchings 已提交
2966 2967
	/* Set the source of the GMAC clock */
	if (falcon_rev(efx) == FALCON_REV_B0) {
2968
		efx_reado(efx, &temp, FR_AB_GPIO_CTL);
2969
		EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
2970
		efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
B
Ben Hutchings 已提交
2971 2972
	}

2973 2974 2975 2976 2977
	rc = falcon_reset_sram(efx);
	if (rc)
		return rc;

	/* Set positions of descriptor caches in SRAM. */
2978
	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
2979
	efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
2980
	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
2981
	efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
2982 2983 2984

	/* Set TX descriptor cache size. */
	BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
2985
	EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
2986
	efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
2987 2988 2989 2990 2991

	/* Set RX descriptor cache size.  Set low watermark to size-8, as
	 * this allows most efficient prefetching.
	 */
	BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
2992
	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
2993
	efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
2994
	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
2995
	efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
2996 2997 2998 2999 3000

	/* Clear the parity enables on the TX data fifos as
	 * they produce false parity errors because of timing issues
	 */
	if (EFX_WORKAROUND_5129(efx)) {
3001
		efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
3002
		EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
3003
		efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
3004 3005 3006 3007 3008 3009 3010 3011 3012
	}

	/* Enable all the genuinely fatal interrupts.  (They are still
	 * masked by the overall interrupt mask, controlled by
	 * falcon_interrupts()).
	 *
	 * Note: All other fatal interrupts are enabled
	 */
	EFX_POPULATE_OWORD_3(temp,
3013 3014 3015
			     FRF_AZ_ILL_ADR_INT_KER_EN, 1,
			     FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
			     FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
3016
	EFX_INVERT_OWORD(temp);
3017
	efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
3018 3019

	if (EFX_WORKAROUND_7244(efx)) {
3020
		efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
3021 3022 3023 3024
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
3025
		efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
3026 3027 3028 3029
	}

	falcon_setup_rss_indir_table(efx);

3030
	/* XXX This is documented only for Falcon A0/A1 */
3031 3032 3033
	/* Setup RX.  Wait for descriptor is broken and must
	 * be disabled.  RXDP recovery shouldn't be needed, but is.
	 */
3034
	efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
3035 3036
	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
3037
	if (EFX_WORKAROUND_5583(efx))
3038
		EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
3039
	efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
3040 3041 3042 3043

	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
	 */
3044
	efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
3045 3046 3047 3048 3049
	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
3050
	/* Enable SW_EV to inherit in char driver - assume harmless here */
3051
	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
3052
	/* Prefetch threshold 2 => fetch when descriptor cache half empty */
3053
	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
3054
	/* Squash TX of packets of 16 bytes or less */
3055
	if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
3056
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
3057
	efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
3058 3059 3060 3061

	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
	 * descriptors (which is bad).
	 */
3062
	efx_reado(efx, &temp, FR_AZ_TX_CFG);
3063
	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
3064
	efx_writeo(efx, &temp, FR_AZ_TX_CFG);
3065

3066
	falcon_init_rx_cfg(efx);
3067 3068

	/* Set destination of both TX and RX Flush events */
3069
	if (falcon_rev(efx) >= FALCON_REV_B0) {
3070
		EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
3071
		efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
3072 3073 3074 3075 3076 3077 3078 3079
	}

	return 0;
}

void falcon_remove_nic(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;
3080 3081
	int rc;

3082 3083
	falcon_board(efx)->fini(efx);

3084
	/* Remove I2C adapter and clear it in preparation for a retry */
3085 3086
	rc = i2c_del_adapter(&efx->i2c_adap);
	BUG_ON(rc);
3087
	memset(&efx->i2c_adap, 0, sizeof(efx->i2c_adap));
3088

3089
	falcon_remove_spi_devices(efx);
3090 3091
	falcon_free_buffer(efx, &efx->irq_status);

B
Ben Hutchings 已提交
3092
	falcon_reset_hw(efx, RESET_TYPE_ALL);
3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108

	/* Release the second function after the reset */
	if (nic_data->pci_dev2) {
		pci_dev_put(nic_data->pci_dev2);
		nic_data->pci_dev2 = NULL;
	}

	/* Tear down the private nic state */
	kfree(efx->nic_data);
	efx->nic_data = NULL;
}

void falcon_update_nic_stats(struct efx_nic *efx)
{
	efx_oword_t cnt;

3109
	efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
3110 3111
	efx->n_rx_nodesc_drop_cnt +=
		EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122
}

/**************************************************************************
 *
 * Revision-dependent attributes used by efx.c
 *
 **************************************************************************
 */

struct efx_nic_type falcon_a_nic_type = {
	.mem_map_size = 0x20000,
3123 3124 3125 3126 3127
	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
	.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
3128
	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3129 3130 3131 3132 3133 3134 3135 3136 3137
	.rx_buffer_padding = 0x24,
	.max_interrupt_mode = EFX_INT_MODE_MSI,
	.phys_addr_channels = 4,
};

struct efx_nic_type falcon_b_nic_type = {
	/* Map everything up to and including the RSS indirection
	 * table.  Don't map MSI-X table, MSI-X PBA since Linux
	 * requires that they not be mapped.  */
3138 3139 3140 3141 3142 3143 3144 3145
	.mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
			 FR_BZ_RX_INDIRECTION_TBL_STEP *
			 FR_BZ_RX_INDIRECTION_TBL_ROWS),
	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
3146
	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3147 3148 3149 3150 3151 3152 3153
	.rx_buffer_padding = 0,
	.max_interrupt_mode = EFX_INT_MODE_MSIX,
	.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
				   * interrupt handler only supports 32
				   * channels */
};