net_driver.h 36.6 KB
Newer Older
1 2 3
/****************************************************************************
 * Driver for Solarflare Solarstorm network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
B
Ben Hutchings 已提交
4
 * Copyright 2005-2011 Solarflare Communications Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

/* Common definitions for all Efx net driver code */

#ifndef EFX_NET_DRIVER_H
#define EFX_NET_DRIVER_H

#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
20
#include <linux/timer.h>
21
#include <linux/mdio.h>
22 23 24 25 26
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
27
#include <linux/mutex.h>
28
#include <linux/vmalloc.h>
29
#include <linux/i2c.h>
30 31 32 33 34 35 36 37 38

#include "enum.h"
#include "bitfield.h"

/**************************************************************************
 *
 * Build definitions
 *
 **************************************************************************/
39

B
Ben Hutchings 已提交
40
#define EFX_DRIVER_VERSION	"3.1"
41

42
#ifdef DEBUG
43 44 45 46 47 48 49 50 51 52 53 54 55
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
#else
#define EFX_BUG_ON_PARANOID(x) do {} while (0)
#define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif

/**************************************************************************
 *
 * Efx data structures
 *
 **************************************************************************/

56
#define EFX_MAX_CHANNELS 32U
57
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
58 59
#define EFX_EXTRA_CHANNEL_IOV	0
#define EFX_MAX_EXTRA_CHANNELS	1U
60

B
Ben Hutchings 已提交
61 62 63
/* Checksum generation is a per-queue option in hardware, so each
 * queue visible to the networking core is backed by two hardware TX
 * queues. */
64 65 66 67 68 69
#define EFX_MAX_TX_TC		2
#define EFX_MAX_CORE_TX_QUEUES	(EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
#define EFX_TXQ_TYPE_OFFLOAD	1	/* flag */
#define EFX_TXQ_TYPE_HIGHPRI	2	/* flag */
#define EFX_TXQ_TYPES		4
#define EFX_MAX_TX_QUEUES	(EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
70

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
/**
 * struct efx_special_buffer - An Efx special buffer
 * @addr: CPU base address of the buffer
 * @dma_addr: DMA base address of the buffer
 * @len: Buffer length, in bytes
 * @index: Buffer index within controller;s buffer table
 * @entries: Number of buffer table entries
 *
 * Special buffers are used for the event queues and the TX and RX
 * descriptor queues for each channel.  They are *not* used for the
 * actual transmit and receive buffers.
 */
struct efx_special_buffer {
	void *addr;
	dma_addr_t dma_addr;
	unsigned int len;
87 88
	unsigned int index;
	unsigned int entries;
89 90 91 92 93 94 95 96
};

/**
 * struct efx_tx_buffer - An Efx TX buffer
 * @skb: The associated socket buffer.
 *	Set only on the final fragment of a packet; %NULL for all other
 *	fragments.  When this fragment completes, then we can free this
 *	skb.
B
Ben Hutchings 已提交
97 98
 * @tsoh: The associated TSO header structure, or %NULL if this
 *	buffer is not a TSO header.
99 100 101 102 103 104 105 106 107
 * @dma_addr: DMA address of the fragment.
 * @len: Length of this fragment.
 *	This field is zero when the queue slot is empty.
 * @continuation: True if this fragment is not the end of a packet.
 * @unmap_single: True if pci_unmap_single should be used.
 * @unmap_len: Length of this fragment to unmap
 */
struct efx_tx_buffer {
	const struct sk_buff *skb;
B
Ben Hutchings 已提交
108
	struct efx_tso_header *tsoh;
109 110
	dma_addr_t dma_addr;
	unsigned short len;
111 112
	bool continuation;
	bool unmap_single;
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
	unsigned short unmap_len;
};

/**
 * struct efx_tx_queue - An Efx TX queue
 *
 * This is a ring buffer of TX fragments.
 * Since the TX completion path always executes on the same
 * CPU and the xmit path can operate on different CPUs,
 * performance is increased by ensuring that the completion
 * path and the xmit path operate on different cache lines.
 * This is particularly important if the xmit path is always
 * executing on one CPU which is different from the completion
 * path.  There is also a cache line for members which are
 * read but not written on the fast path.
 *
 * @efx: The associated Efx NIC
 * @queue: DMA queue number
 * @channel: The associated channel
132
 * @core_txq: The networking core TX queue structure
133 134
 * @buffer: The software buffer ring
 * @txd: The hardware descriptor ring
135
 * @ptr_mask: The size of the ring minus 1.
136
 * @initialised: Has hardware queue been initialised?
137 138
 * @read_count: Current read pointer.
 *	This is the number of buffers that have been removed from both rings.
139 140 141 142 143 144
 * @old_write_count: The value of @write_count when last checked.
 *	This is here for performance reasons.  The xmit path will
 *	only get the up-to-date value of @write_count if this
 *	variable indicates that the queue is empty.  This is to
 *	avoid cache-line ping-pong between the xmit path and the
 *	completion path.
145 146 147 148 149 150 151 152 153 154 155 156
 * @insert_count: Current insert pointer
 *	This is the number of buffers that have been added to the
 *	software ring.
 * @write_count: Current write pointer
 *	This is the number of buffers that have been added to the
 *	hardware ring.
 * @old_read_count: The value of read_count when last checked.
 *	This is here for performance reasons.  The xmit path will
 *	only get the up-to-date value of read_count if this
 *	variable indicates that the queue is full.  This is to
 *	avoid cache-line ping-pong between the xmit path and the
 *	completion path.
B
Ben Hutchings 已提交
157 158 159 160 161 162 163
 * @tso_headers_free: A list of TSO headers allocated for this TX queue
 *	that are not in use, and so available for new TSO sends. The list
 *	is protected by the TX queue lock.
 * @tso_bursts: Number of times TSO xmit invoked by kernel
 * @tso_long_headers: Number of packets with headers too long for standard
 *	blocks
 * @tso_packets: Number of packets via the TSO xmit path
164 165 166 167
 * @pushes: Number of times the TX push feature has been used
 * @empty_read_count: If the completion path has seen the queue as empty
 *	and the transmission path has not yet checked this, the value of
 *	@read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
168 169 170 171
 */
struct efx_tx_queue {
	/* Members which don't change on the fast path */
	struct efx_nic *efx ____cacheline_aligned_in_smp;
B
Ben Hutchings 已提交
172
	unsigned queue;
173
	struct efx_channel *channel;
174
	struct netdev_queue *core_txq;
175 176
	struct efx_tx_buffer *buffer;
	struct efx_special_buffer txd;
177
	unsigned int ptr_mask;
178
	bool initialised;
179 180 181

	/* Members used mainly on the completion path */
	unsigned int read_count ____cacheline_aligned_in_smp;
182
	unsigned int old_write_count;
183 184 185 186 187

	/* Members used only on the xmit path */
	unsigned int insert_count ____cacheline_aligned_in_smp;
	unsigned int write_count;
	unsigned int old_read_count;
B
Ben Hutchings 已提交
188 189 190 191
	struct efx_tso_header *tso_headers_free;
	unsigned int tso_bursts;
	unsigned int tso_long_headers;
	unsigned int tso_packets;
192 193 194 195 196
	unsigned int pushes;

	/* Members shared between paths and sometimes updated */
	unsigned int empty_read_count ____cacheline_aligned_in_smp;
#define EFX_EMPTY_COUNT_VALID 0x80000000
197 198 199 200 201
};

/**
 * struct efx_rx_buffer - An Efx RX data buffer
 * @dma_addr: DMA base address of the buffer
202 203 204 205
 * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE).
 *	Will be %NULL if the buffer slot is currently free.
 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
 *	Will be %NULL if the buffer slot is currently free.
206
 * @len: Buffer length, in bytes.
207
 * @flags: Flags for buffer and packet state.
208 209 210
 */
struct efx_rx_buffer {
	dma_addr_t dma_addr;
211 212 213 214
	union {
		struct sk_buff *skb;
		struct page *page;
	} u;
215
	unsigned int len;
216
	u16 flags;
217
};
218 219 220
#define EFX_RX_BUF_PAGE		0x0001
#define EFX_RX_PKT_CSUMMED	0x0002
#define EFX_RX_PKT_DISCARD	0x0004
221

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
/**
 * struct efx_rx_page_state - Page-based rx buffer state
 *
 * Inserted at the start of every page allocated for receive buffers.
 * Used to facilitate sharing dma mappings between recycled rx buffers
 * and those passed up to the kernel.
 *
 * @refcnt: Number of struct efx_rx_buffer's referencing this page.
 *	When refcnt falls to zero, the page is unmapped for dma
 * @dma_addr: The dma address of this page.
 */
struct efx_rx_page_state {
	unsigned refcnt;
	dma_addr_t dma_addr;

	unsigned int __pad[0] ____cacheline_aligned;
};

240 241 242 243 244
/**
 * struct efx_rx_queue - An Efx RX queue
 * @efx: The associated Efx NIC
 * @buffer: The software buffer ring
 * @rxd: The hardware descriptor ring
245
 * @ptr_mask: The size of the ring minus 1.
246 247 248
 * @enabled: Receive queue enabled indicator.
 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
 *	@rxq_flush_pending.
249 250 251 252 253 254 255 256 257 258 259
 * @added_count: Number of buffers added to the receive queue.
 * @notified_count: Number of buffers given to NIC (<= @added_count).
 * @removed_count: Number of buffers removed from the receive queue.
 * @max_fill: RX descriptor maximum fill level (<= ring size)
 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
 *	(<= @max_fill)
 * @min_fill: RX descriptor minimum non-zero fill level.
 *	This records the minimum fill level observed when a ring
 *	refill was triggered.
 * @alloc_page_count: RX allocation strategy counter.
 * @alloc_skb_count: RX allocation strategy counter.
260
 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
261 262 263 264 265
 */
struct efx_rx_queue {
	struct efx_nic *efx;
	struct efx_rx_buffer *buffer;
	struct efx_special_buffer rxd;
266
	unsigned int ptr_mask;
267 268
	bool enabled;
	bool flush_pending;
269 270 271 272 273 274 275 276 277 278

	int added_count;
	int notified_count;
	int removed_count;
	unsigned int max_fill;
	unsigned int fast_fill_trigger;
	unsigned int min_fill;
	unsigned int min_overfill;
	unsigned int alloc_page_count;
	unsigned int alloc_skb_count;
279
	struct timer_list slow_fill;
280 281 282 283 284 285 286 287 288
	unsigned int slow_fill_count;
};

/**
 * struct efx_buffer - An Efx general-purpose buffer
 * @addr: host base address of the buffer
 * @dma_addr: DMA base address of the buffer
 * @len: Buffer length, in bytes
 *
289
 * The NIC uses these buffers for its interrupt status registers and
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
 * MAC stats dumps.
 */
struct efx_buffer {
	void *addr;
	dma_addr_t dma_addr;
	unsigned int len;
};


enum efx_rx_alloc_method {
	RX_ALLOC_METHOD_AUTO = 0,
	RX_ALLOC_METHOD_SKB = 1,
	RX_ALLOC_METHOD_PAGE = 2,
};

/**
 * struct efx_channel - An Efx channel
 *
 * A channel comprises an event queue, at least one TX queue, at least
 * one RX queue, and an associated tasklet for processing the event
 * queue.
 *
 * @efx: Associated Efx NIC
 * @channel: Channel instance number
314
 * @type: Channel type definition
315 316
 * @enabled: Channel enabled indicator
 * @irq: IRQ number (MSI and MSI-X only)
317
 * @irq_moderation: IRQ moderation value (in hardware ticks)
318 319 320 321
 * @napi_dev: Net device used with NAPI
 * @napi_str: NAPI control structure
 * @work_pending: Is work pending via NAPI?
 * @eventq: Event queue buffer
322
 * @eventq_mask: Event queue pointer mask
323
 * @eventq_read_ptr: Event queue read pointer
324
 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
325 326
 * @irq_count: Number of IRQs since last adaptive moderation decision
 * @irq_mod_score: IRQ moderation score
327 328 329 330 331 332 333
 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
 *	and diagnostic counters
 * @rx_alloc_push_pages: RX allocation method currently in use for pushing
 *	descriptors
 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
B
Ben Hutchings 已提交
334
 * @n_rx_mcast_mismatch: Count of unmatched multicast frames
335 336 337
 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
 * @n_rx_overlength: Count of RX_OVERLENGTH errors
 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
338 339
 * @rx_queue: RX queue for this channel
 * @tx_queue: TX queues for this channel
340 341 342 343
 */
struct efx_channel {
	struct efx_nic *efx;
	int channel;
344
	const struct efx_channel_type *type;
345
	bool enabled;
346 347 348 349
	int irq;
	unsigned int irq_moderation;
	struct net_device *napi_dev;
	struct napi_struct napi_str;
350
	bool work_pending;
351
	struct efx_special_buffer eventq;
352
	unsigned int eventq_mask;
353
	unsigned int eventq_read_ptr;
354
	int event_test_cpu;
355

356 357
	unsigned int irq_count;
	unsigned int irq_mod_score;
358 359 360
#ifdef CONFIG_RFS_ACCEL
	unsigned int rfs_filters_added;
#endif
361

362 363 364 365 366 367
	int rx_alloc_level;
	int rx_alloc_push_pages;

	unsigned n_rx_tobe_disc;
	unsigned n_rx_ip_hdr_chksum_err;
	unsigned n_rx_tcp_udp_chksum_err;
B
Ben Hutchings 已提交
368
	unsigned n_rx_mcast_mismatch;
369 370 371 372 373 374 375 376 377
	unsigned n_rx_frm_trunc;
	unsigned n_rx_overlength;
	unsigned n_skbuff_leaks;

	/* Used to pipeline received packets in order to optimise memory
	 * access with prefetches.
	 */
	struct efx_rx_buffer *rx_pkt;

378
	struct efx_rx_queue rx_queue;
379
	struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
380 381
};

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
/**
 * struct efx_channel_type - distinguishes traffic and extra channels
 * @handle_no_channel: Handle failure to allocate an extra channel
 * @pre_probe: Set up extra state prior to initialisation
 * @post_remove: Tear down extra state after finalisation, if allocated.
 *	May be called on channels that have not been probed.
 * @get_name: Generate the channel's name (used for its IRQ handler)
 * @copy: Copy the channel state prior to reallocation.  May be %NULL if
 *	reallocation is not supported.
 * @keep_eventq: Flag for whether event queue should be kept initialised
 *	while the device is stopped
 */
struct efx_channel_type {
	void (*handle_no_channel)(struct efx_nic *);
	int (*pre_probe)(struct efx_channel *);
	void (*get_name)(struct efx_channel *, char *buf, size_t len);
	struct efx_channel *(*copy)(const struct efx_channel *);
	bool keep_eventq;
};

402 403 404 405 406 407
enum efx_led_mode {
	EFX_LED_OFF	= 0,
	EFX_LED_ON	= 1,
	EFX_LED_DEFAULT	= 2
};

408 409 410
#define STRING_TABLE_LOOKUP(val, member) \
	((val) < member ## _max) ? member ## _names[val] : "(invalid)"

411
extern const char *const efx_loopback_mode_names[];
412 413 414 415
extern const unsigned int efx_loopback_mode_max;
#define LOOPBACK_MODE(efx) \
	STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)

416
extern const char *const efx_reset_type_names[];
417 418 419
extern const unsigned int efx_reset_type_max;
#define RESET_TYPE(type) \
	STRING_TABLE_LOOKUP(type, efx_reset_type)
420

421 422 423 424 425 426 427 428 429 430 431 432 433
enum efx_int_mode {
	/* Be careful if altering to correct macro below */
	EFX_INT_MODE_MSIX = 0,
	EFX_INT_MODE_MSI = 1,
	EFX_INT_MODE_LEGACY = 2,
	EFX_INT_MODE_MAX	/* Insert any new items before this */
};
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)

enum nic_state {
	STATE_INIT = 0,
	STATE_RUNNING = 1,
	STATE_FINI = 2,
434
	STATE_DISABLED = 3,
435 436 437 438 439 440 441 442 443 444
	STATE_MAX,
};

/*
 * Alignment of page-allocated RX buffers
 *
 * Controls the number of bytes inserted at the start of an RX buffer.
 * This is the equivalent of NET_IP_ALIGN [which controls the alignment
 * of the skb->head for hardware DMA].
 */
445
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
#define EFX_PAGE_IP_ALIGN 0
#else
#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
#endif

/*
 * Alignment of the skb->head which wraps a page-allocated RX buffer
 *
 * The skb allocated to wrap an rx_buffer can have this alignment. Since
 * the data is memcpy'd from the rx_buf, it does not need to be equal to
 * EFX_PAGE_IP_ALIGN.
 */
#define EFX_PAGE_SKB_ALIGN 2

/* Forward declaration */
struct efx_nic;

/* Pseudo bit-mask flow control field */
464 465 466
#define EFX_FC_RX	FLOW_CTRL_RX
#define EFX_FC_TX	FLOW_CTRL_TX
#define EFX_FC_AUTO	4
467

468 469 470 471 472 473 474 475 476 477
/**
 * struct efx_link_state - Current state of the link
 * @up: Link is up
 * @fd: Link is full-duplex
 * @fc: Actual flow control flags
 * @speed: Link speed (Mbps)
 */
struct efx_link_state {
	bool up;
	bool fd;
478
	u8 fc;
479 480 481
	unsigned int speed;
};

S
Steve Hodgson 已提交
482 483 484 485 486 487 488
static inline bool efx_link_state_equal(const struct efx_link_state *left,
					const struct efx_link_state *right)
{
	return left->up == right->up && left->fd == right->fd &&
		left->fc == right->fc && left->speed == right->speed;
}

489 490
/**
 * struct efx_phy_operations - Efx PHY operations table
491 492
 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
 *	efx->loopback_modes.
493 494 495
 * @init: Initialise PHY
 * @fini: Shut down PHY
 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
S
Steve Hodgson 已提交
496 497
 * @poll: Update @link_state and report whether it changed.
 *	Serialised by the mac_lock.
498 499
 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
500
 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
B
Ben Hutchings 已提交
501
 *	(only needed where AN bit is set in mmds)
502
 * @test_alive: Test that PHY is 'alive' (online)
503
 * @test_name: Get the name of a PHY-specific test/result
504
 * @run_tests: Run tests and record results as appropriate (offline).
505
 *	Flags are the ethtool tests flags.
506 507
 */
struct efx_phy_operations {
508
	int (*probe) (struct efx_nic *efx);
509 510
	int (*init) (struct efx_nic *efx);
	void (*fini) (struct efx_nic *efx);
511
	void (*remove) (struct efx_nic *efx);
B
Ben Hutchings 已提交
512
	int (*reconfigure) (struct efx_nic *efx);
S
Steve Hodgson 已提交
513
	bool (*poll) (struct efx_nic *efx);
514 515 516 517
	void (*get_settings) (struct efx_nic *efx,
			      struct ethtool_cmd *ecmd);
	int (*set_settings) (struct efx_nic *efx,
			     struct ethtool_cmd *ecmd);
518
	void (*set_npage_adv) (struct efx_nic *efx, u32);
519
	int (*test_alive) (struct efx_nic *efx);
520
	const char *(*test_name) (struct efx_nic *efx, unsigned int index);
521
	int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
522 523 524 525 526
	int (*get_module_eeprom) (struct efx_nic *efx,
			       struct ethtool_eeprom *ee,
			       u8 *data);
	int (*get_module_info) (struct efx_nic *efx,
				struct ethtool_modinfo *modinfo);
527 528
};

529 530 531 532
/**
 * @enum efx_phy_mode - PHY operating mode flags
 * @PHY_MODE_NORMAL: on and should pass traffic
 * @PHY_MODE_TX_DISABLED: on with TX disabled
533 534
 * @PHY_MODE_LOW_POWER: set to low power through MDIO
 * @PHY_MODE_OFF: switched off through external control
535 536 537 538 539
 * @PHY_MODE_SPECIAL: on but will not pass traffic
 */
enum efx_phy_mode {
	PHY_MODE_NORMAL		= 0,
	PHY_MODE_TX_DISABLED	= 1,
540 541
	PHY_MODE_LOW_POWER	= 2,
	PHY_MODE_OFF		= 4,
542 543 544 545 546
	PHY_MODE_SPECIAL	= 8,
};

static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
{
B
Ben Hutchings 已提交
547
	return !!(mode & ~PHY_MODE_TX_DISABLED);
548 549
}

550 551 552 553 554 555 556 557 558 559 560
/*
 * Efx extended statistics
 *
 * Not all statistics are provided by all supported MACs.  The purpose
 * is this structure is to contain the raw statistics provided by each
 * MAC.
 */
struct efx_mac_stats {
	u64 tx_bytes;
	u64 tx_good_bytes;
	u64 tx_bad_bytes;
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
	u64 tx_packets;
	u64 tx_bad;
	u64 tx_pause;
	u64 tx_control;
	u64 tx_unicast;
	u64 tx_multicast;
	u64 tx_broadcast;
	u64 tx_lt64;
	u64 tx_64;
	u64 tx_65_to_127;
	u64 tx_128_to_255;
	u64 tx_256_to_511;
	u64 tx_512_to_1023;
	u64 tx_1024_to_15xx;
	u64 tx_15xx_to_jumbo;
	u64 tx_gtjumbo;
	u64 tx_collision;
	u64 tx_single_collision;
	u64 tx_multiple_collision;
	u64 tx_excessive_collision;
	u64 tx_deferred;
	u64 tx_late_collision;
	u64 tx_excessive_deferred;
	u64 tx_non_tcpudp;
	u64 tx_mac_src_error;
	u64 tx_ip_src_error;
587 588 589
	u64 rx_bytes;
	u64 rx_good_bytes;
	u64 rx_bad_bytes;
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
	u64 rx_packets;
	u64 rx_good;
	u64 rx_bad;
	u64 rx_pause;
	u64 rx_control;
	u64 rx_unicast;
	u64 rx_multicast;
	u64 rx_broadcast;
	u64 rx_lt64;
	u64 rx_64;
	u64 rx_65_to_127;
	u64 rx_128_to_255;
	u64 rx_256_to_511;
	u64 rx_512_to_1023;
	u64 rx_1024_to_15xx;
	u64 rx_15xx_to_jumbo;
	u64 rx_gtjumbo;
	u64 rx_bad_lt64;
	u64 rx_bad_64_to_15xx;
	u64 rx_bad_15xx_to_jumbo;
	u64 rx_bad_gtjumbo;
	u64 rx_overflow;
	u64 rx_missed;
	u64 rx_false_carrier;
	u64 rx_symbol_error;
	u64 rx_align_error;
	u64 rx_length_error;
	u64 rx_internal_error;
	u64 rx_good_lt64;
619 620 621 622 623 624 625 626 627 628 629 630 631 632
};

/* Number of bits used in a multicast filter hash address */
#define EFX_MCAST_HASH_BITS 8

/* Number of (single-bit) entries in a multicast filter hash */
#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)

/* An Efx multicast filter hash */
union efx_multicast_hash {
	u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
	efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};

B
Ben Hutchings 已提交
633
struct efx_filter_state;
634 635
struct efx_vf;
struct vfdi_status;
B
Ben Hutchings 已提交
636

637 638 639 640 641 642
/**
 * struct efx_nic - an Efx NIC
 * @name: Device name (net device name or bus id before net device registered)
 * @pci_dev: The PCI device
 * @type: Controller type attributes
 * @legacy_irq: IRQ number
643
 * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
644 645
 * @workqueue: Workqueue for port reconfigures and the HW monitor.
 *	Work items do not hold and must not acquire RTNL.
646
 * @workqueue_name: Name of workqueue
647 648 649 650
 * @reset_work: Scheduled reset workitem
 * @membase_phys: Memory BAR value as physical address
 * @membase: Memory BAR value
 * @interrupt_mode: Interrupt mode
651
 * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
652 653
 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
 * @irq_rx_moderation: IRQ moderation time for RX event queues
654
 * @msg_enable: Log message enable flags
655
 * @state: Device state flag. Serialised by the rtnl_lock.
656
 * @reset_pending: Bitmask for pending resets
657 658 659
 * @tx_queue: TX DMA queues
 * @rx_queue: RX DMA queues
 * @channel: Channels
660
 * @channel_name: Names for channels and their IRQs
661 662
 * @extra_channel_types: Types of extra (non-traffic) channels that
 *	should be allocated for this NIC
663 664
 * @rxq_entries: Size of receive queues requested by user.
 * @txq_entries: Size of transmit queues requested by user.
665 666 667
 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
 * @sram_lim_qw: Qword address limit of SRAM
668
 * @next_buffer_table: First available buffer table id
669
 * @n_channels: Number of channels in use
B
Ben Hutchings 已提交
670 671
 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
 * @n_tx_channels: Number of channels used for TX
672 673
 * @rx_buffer_len: RX buffer length
 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
674
 * @rx_hash_key: Toeplitz hash key for RSS
675
 * @rx_indir_table: Indirection table for RSS
676 677
 * @int_error_count: Number of internal errors seen recently
 * @int_error_expire: Time at which error count will be expired
678
 * @irq_status: Interrupt status buffer
679
 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
680
 * @irq_level: IRQ level/index for IRQs not triggered by an event queue
681
 * @selftest_work: Work item for asynchronous self-test
682
 * @mtd_list: List of MTDs attached to the NIC
L
Lucas De Marchi 已提交
683
 * @nic_data: Hardware dependent state
B
Ben Hutchings 已提交
684
 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
685
 *	efx_monitor() and efx_reconfigure_port()
686
 * @port_enabled: Port enabled indicator.
S
Steve Hodgson 已提交
687 688 689 690
 *	Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
 *	efx_mac_work() with kernel interfaces. Safe to read under any
 *	one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
 *	be held to modify it.
691 692 693 694 695 696
 * @port_initialized: Port initialized?
 * @net_dev: Operating system network device. Consider holding the rtnl lock
 * @stats_buffer: DMA buffer for statistics
 * @phy_type: PHY type
 * @phy_op: PHY interface
 * @phy_data: PHY private data (including PHY-specific stats)
697
 * @mdio: PHY MDIO interface
698
 * @mdio_bus: PHY MDIO bus ID (only used by Siena)
B
Ben Hutchings 已提交
699
 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
B
Ben Hutchings 已提交
700
 * @link_advertising: Autonegotiation advertising flags
701
 * @link_state: Current state of the link
702 703 704
 * @n_link_state_changes: Number of times the link has changed state
 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
 * @multicast_hash: Multicast hash table
B
Ben Hutchings 已提交
705
 * @wanted_fc: Wanted flow control flags
706 707 708
 * @fc_disable: When non-zero flow control is disabled. Typically used to
 *	ensure that network back pressure doesn't delay dma queue flushes.
 *	Serialised by the rtnl lock.
709
 * @mac_work: Work item for changing MAC promiscuity and multicast hash
710 711 712
 * @loopback_mode: Loopback status
 * @loopback_modes: Supported loopback mode bitmask
 * @loopback_selftest: Offline self-test private state
713 714 715 716 717 718 719
 * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
 *	Decremented when the efx_flush_rx_queue() is called.
 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
 *	completed (either success or failure). Not used when MCDI is used to
 *	flush receive queues.
 * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
720 721 722 723 724 725 726 727 728 729 730
 * @vf: Array of &struct efx_vf objects.
 * @vf_count: Number of VFs intended to be enabled.
 * @vf_init_count: Number of VFs that have been fully initialised.
 * @vi_scale: log2 number of vnics per VF.
 * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
 * @vfdi_status: Common VFDI status page to be dmad to VF address space.
 * @local_addr_list: List of local addresses. Protected by %local_lock.
 * @local_page_list: List of DMA addressable pages used to broadcast
 *	%local_addr_list. Protected by %local_lock.
 * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
 * @peer_work: Work item to broadcast peer addresses to VMs.
731 732
 * @monitor_work: Hardware monitor workitem
 * @biu_lock: BIU (bus interface unit) lock
733 734 735
 * @last_irq_cpu: Last CPU to handle a possible test interrupt.  This
 *	field is used by efx_test_interrupts() to verify that an
 *	interrupt has occurred.
736 737 738 739 740
 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
 * @mac_stats: MAC statistics. These include all statistics the MACs
 *	can provide.  Generic code converts these into a standard
 *	&struct net_device_stats.
 * @stats_lock: Statistics update lock. Serialises statistics fetches
741
 *	and access to @mac_stats.
742
 *
743
 * This is stored in the private area of the &struct net_device.
744 745
 */
struct efx_nic {
746 747
	/* The following fields should be written very rarely */

748 749 750 751
	char name[IFNAMSIZ];
	struct pci_dev *pci_dev;
	const struct efx_nic_type *type;
	int legacy_irq;
752
	bool legacy_irq_enabled;
753
	struct workqueue_struct *workqueue;
754
	char workqueue_name[16];
755
	struct work_struct reset_work;
756
	resource_size_t membase_phys;
757
	void __iomem *membase;
758

759
	enum efx_int_mode interrupt_mode;
760
	unsigned int timer_quantum_ns;
761 762
	bool irq_rx_adaptive;
	unsigned int irq_rx_moderation;
763
	u32 msg_enable;
764 765

	enum nic_state state;
766
	unsigned long reset_pending;
767

768
	struct efx_channel *channel[EFX_MAX_CHANNELS];
769
	char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
770 771
	const struct efx_channel_type *
	extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
772

773 774
	unsigned rxq_entries;
	unsigned txq_entries;
775 776 777
	unsigned tx_dc_base;
	unsigned rx_dc_base;
	unsigned sram_lim_qw;
778
	unsigned next_buffer_table;
B
Ben Hutchings 已提交
779 780
	unsigned n_channels;
	unsigned n_rx_channels;
781
	unsigned rss_spread;
782
	unsigned tx_channel_offset;
B
Ben Hutchings 已提交
783
	unsigned n_tx_channels;
784 785
	unsigned int rx_buffer_len;
	unsigned int rx_buffer_order;
786
	u8 rx_hash_key[40];
787
	u32 rx_indir_table[128];
788

789 790 791
	unsigned int_error_count;
	unsigned long int_error_expire;

792
	struct efx_buffer irq_status;
793
	unsigned irq_zero_count;
794
	unsigned irq_level;
795
	struct delayed_work selftest_work;
796

797 798 799
#ifdef CONFIG_SFC_MTD
	struct list_head mtd_list;
#endif
800

801
	void *nic_data;
802 803

	struct mutex mac_lock;
804
	struct work_struct mac_work;
805
	bool port_enabled;
806

807
	bool port_initialized;
808 809 810 811
	struct net_device *net_dev;

	struct efx_buffer stats_buffer;

812
	unsigned int phy_type;
813
	const struct efx_phy_operations *phy_op;
814
	void *phy_data;
815
	struct mdio_if_info mdio;
816
	unsigned int mdio_bus;
817
	enum efx_phy_mode phy_mode;
818

B
Ben Hutchings 已提交
819
	u32 link_advertising;
820
	struct efx_link_state link_state;
821 822
	unsigned int n_link_state_changes;

823
	bool promiscuous;
824
	union efx_multicast_hash multicast_hash;
825
	u8 wanted_fc;
826
	unsigned fc_disable;
827 828

	atomic_t rx_reset;
829
	enum efx_loopback_mode loopback_mode;
830
	u64 loopback_modes;
831 832

	void *loopback_selftest;
B
Ben Hutchings 已提交
833 834

	struct efx_filter_state *filter_state;
835

836 837 838 839 840
	atomic_t drain_pending;
	atomic_t rxq_flush_pending;
	atomic_t rxq_flush_outstanding;
	wait_queue_head_t flush_wq;

841 842 843 844 845 846 847 848 849 850 851 852 853 854
#ifdef CONFIG_SFC_SRIOV
	struct efx_channel *vfdi_channel;
	struct efx_vf *vf;
	unsigned vf_count;
	unsigned vf_init_count;
	unsigned vi_scale;
	unsigned vf_buftbl_base;
	struct efx_buffer vfdi_status;
	struct list_head local_addr_list;
	struct list_head local_page_list;
	struct mutex local_lock;
	struct work_struct peer_work;
#endif

855 856 857 858
	/* The following fields may be written more often */

	struct delayed_work monitor_work ____cacheline_aligned_in_smp;
	spinlock_t biu_lock;
859
	int last_irq_cpu;
860 861 862
	unsigned n_rx_nodesc_drop_cnt;
	struct efx_mac_stats mac_stats;
	spinlock_t stats_lock;
863 864
};

865 866 867 868 869
static inline int efx_dev_registered(struct efx_nic *efx)
{
	return efx->net_dev->reg_state == NETREG_REGISTERED;
}

870 871
static inline unsigned int efx_port_num(struct efx_nic *efx)
{
872
	return efx->net_dev->dev_id;
873 874
}

875 876
/**
 * struct efx_nic_type - Efx device type definition
877 878 879
 * @probe: Probe the controller
 * @remove: Free resources allocated by probe()
 * @init: Initialise the controller
880 881
 * @dimension_resources: Dimension controller resources (buffer table,
 *	and VIs once the available interrupt resources are clear)
882 883
 * @fini: Shut down the controller
 * @monitor: Periodic function for polling link state and hardware monitor
884 885
 * @map_reset_reason: Map ethtool reset reason to a reset method
 * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
886 887 888 889
 * @reset: Reset the controller hardware and possibly the PHY.  This will
 *	be called while the controller is uninitialised.
 * @probe_port: Probe the MAC and PHY
 * @remove_port: Free resources allocated by probe_port()
890
 * @handle_global_event: Handle a "global" event (may be %NULL)
891 892 893 894
 * @prepare_flush: Prepare the hardware for flushing the DMA queues
 * @update_stats: Update statistics not provided by event handling
 * @start_stats: Start the regular fetching of statistics
 * @stop_stats: Stop the regular fetching of statistics
895
 * @set_id_led: Set state of identifying LED or revert to automatic function
896
 * @push_irq_moderation: Apply interrupt moderation value
B
Ben Hutchings 已提交
897
 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
898 899
 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
 *	to the hardware.  Serialised by the mac_lock.
900
 * @check_mac_fault: Check MAC fault state. True if fault present.
901 902 903
 * @get_wol: Get WoL configuration from driver state
 * @set_wol: Push WoL configuration to the NIC
 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
904
 * @test_registers: Test read/write functionality of control registers
905
 * @test_nvram: Test validity of NVRAM contents
906
 * @revision: Hardware architecture revision
907 908 909 910 911 912 913
 * @mem_map_size: Memory BAR mapped size
 * @txd_ptr_tbl_base: TX descriptor ring base address
 * @rxd_ptr_tbl_base: RX descriptor ring base address
 * @buf_tbl_base: Buffer table base address
 * @evq_ptr_tbl_base: Event queue pointer table base address
 * @evq_rptr_tbl_base: Event queue read-pointer table base address
 * @max_dma_mask: Maximum possible DMA mask
914 915
 * @rx_buffer_hash_size: Size of hash at start of RX buffer
 * @rx_buffer_padding: Size of padding at end of RX buffer
916 917 918 919
 * @max_interrupt_mode: Highest capability interrupt mode supported
 *	from &enum efx_init_mode.
 * @phys_addr_channels: Number of channels with physically addressed
 *	descriptors
920
 * @timer_period_max: Maximum period of interrupt timer (in ticks)
921 922
 * @offload_features: net_device feature flags for protocol offload
 *	features implemented in hardware
923 924
 */
struct efx_nic_type {
925 926 927
	int (*probe)(struct efx_nic *efx);
	void (*remove)(struct efx_nic *efx);
	int (*init)(struct efx_nic *efx);
928
	void (*dimension_resources)(struct efx_nic *efx);
929 930
	void (*fini)(struct efx_nic *efx);
	void (*monitor)(struct efx_nic *efx);
931 932
	enum reset_type (*map_reset_reason)(enum reset_type reason);
	int (*map_reset_flags)(u32 *flags);
933 934 935
	int (*reset)(struct efx_nic *efx, enum reset_type method);
	int (*probe_port)(struct efx_nic *efx);
	void (*remove_port)(struct efx_nic *efx);
936
	bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
937 938 939 940
	void (*prepare_flush)(struct efx_nic *efx);
	void (*update_stats)(struct efx_nic *efx);
	void (*start_stats)(struct efx_nic *efx);
	void (*stop_stats)(struct efx_nic *efx);
941
	void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
942
	void (*push_irq_moderation)(struct efx_channel *channel);
B
Ben Hutchings 已提交
943
	int (*reconfigure_port)(struct efx_nic *efx);
944 945
	int (*reconfigure_mac)(struct efx_nic *efx);
	bool (*check_mac_fault)(struct efx_nic *efx);
946 947 948
	void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
	int (*set_wol)(struct efx_nic *efx, u32 type);
	void (*resume_wol)(struct efx_nic *efx);
949
	int (*test_registers)(struct efx_nic *efx);
950
	int (*test_nvram)(struct efx_nic *efx);
951

952
	int revision;
953 954 955 956 957 958
	unsigned int mem_map_size;
	unsigned int txd_ptr_tbl_base;
	unsigned int rxd_ptr_tbl_base;
	unsigned int buf_tbl_base;
	unsigned int evq_ptr_tbl_base;
	unsigned int evq_rptr_tbl_base;
959
	u64 max_dma_mask;
960
	unsigned int rx_buffer_hash_size;
961 962 963
	unsigned int rx_buffer_padding;
	unsigned int max_interrupt_mode;
	unsigned int phys_addr_channels;
964
	unsigned int timer_period_max;
965
	netdev_features_t offload_features;
966 967 968 969 970 971 972 973
};

/**************************************************************************
 *
 * Prototypes and inline functions
 *
 *************************************************************************/

974 975 976 977
static inline struct efx_channel *
efx_get_channel(struct efx_nic *efx, unsigned index)
{
	EFX_BUG_ON_PARANOID(index >= efx->n_channels);
978
	return efx->channel[index];
979 980
}

981 982
/* Iterate over all used channels */
#define efx_for_each_channel(_channel, _efx)				\
983 984 985 986
	for (_channel = (_efx)->channel[0];				\
	     _channel;							\
	     _channel = (_channel->channel + 1 < (_efx)->n_channels) ?	\
		     (_efx)->channel[_channel->channel + 1] : NULL)
987

988 989 990 991 992 993 994
/* Iterate over all used channels in reverse */
#define efx_for_each_channel_rev(_channel, _efx)			\
	for (_channel = (_efx)->channel[(_efx)->n_channels - 1];	\
	     _channel;							\
	     _channel = _channel->channel ?				\
		     (_efx)->channel[_channel->channel - 1] : NULL)

995 996 997 998 999 1000 1001
static inline struct efx_tx_queue *
efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
{
	EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
			    type >= EFX_TXQ_TYPES);
	return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
}
1002

1003 1004 1005 1006 1007 1008
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
	return channel->channel - channel->efx->tx_channel_offset <
		channel->efx->n_tx_channels;
}

1009 1010 1011
static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{
1012 1013 1014
	EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
			    type >= EFX_TXQ_TYPES);
	return &channel->tx_queue[type];
1015
}
1016

1017 1018 1019 1020 1021 1022
static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
{
	return !(tx_queue->efx->net_dev->num_tc < 2 &&
		 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
}

1023 1024
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel)		\
1025 1026 1027 1028
	if (!efx_channel_has_tx_queues(_channel))			\
		;							\
	else								\
		for (_tx_queue = (_channel)->tx_queue;			\
1029 1030
		     _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
			     efx_tx_queue_used(_tx_queue);		\
1031
		     _tx_queue++)
1032

1033 1034
/* Iterate over all possible TX queues belonging to a channel */
#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel)	\
1035 1036 1037 1038 1039 1040
	if (!efx_channel_has_tx_queues(_channel))			\
		;							\
	else								\
		for (_tx_queue = (_channel)->tx_queue;			\
		     _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES;	\
		     _tx_queue++)
1041

1042 1043 1044 1045 1046
static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
{
	return channel->channel < channel->efx->n_rx_channels;
}

1047 1048 1049
static inline struct efx_rx_queue *
efx_channel_get_rx_queue(struct efx_channel *channel)
{
1050 1051
	EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
	return &channel->rx_queue;
1052 1053
}

1054 1055
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel)		\
1056 1057 1058 1059 1060 1061
	if (!efx_channel_has_rx_queue(_channel))			\
		;							\
	else								\
		for (_rx_queue = &(_channel)->rx_queue;			\
		     _rx_queue;						\
		     _rx_queue = NULL)
1062

1063 1064 1065
static inline struct efx_channel *
efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
{
1066
	return container_of(rx_queue, struct efx_channel, rx_queue);
1067 1068 1069 1070
}

static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
{
1071
	return efx_rx_queue_channel(rx_queue)->channel;
1072 1073
}

1074 1075 1076 1077 1078 1079
/* Returns a pointer to the specified receive buffer in the RX
 * descriptor queue.
 */
static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
						  unsigned int index)
{
1080
	return &rx_queue->buffer[index];
1081 1082 1083
}

/* Set bit in a little-endian bitfield */
1084
static inline void set_bit_le(unsigned nr, unsigned char *addr)
1085 1086 1087 1088 1089
{
	addr[nr / 8] |= (1 << (nr % 8));
}

/* Clear bit in a little-endian bitfield */
1090
static inline void clear_bit_le(unsigned nr, unsigned char *addr)
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
{
	addr[nr / 8] &= ~(1 << (nr % 8));
}


/**
 * EFX_MAX_FRAME_LEN - calculate maximum frame length
 *
 * This calculates the maximum frame length that will be used for a
 * given MTU.  The frame length will be equal to the MTU plus a
 * constant amount of header space and padding.  This is the quantity
 * that the net driver will program into the MAC as the maximum frame
 * length.
 *
1105
 * The 10G MAC requires 8-byte alignment on the frame
1106
 * length, so we round up to the nearest 8.
1107 1108 1109 1110 1111
 *
 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
 * XGMII cycle).  If the frame length reaches the maximum value in the
 * same cycle, the XMAC can miss the IPG altogether.  We work around
 * this by adding a further 16 bytes.
1112 1113
 */
#define EFX_MAX_FRAME_LEN(mtu) \
1114
	((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
1115 1116 1117


#endif /* EFX_NET_DRIVER_H */