net_driver.h 47.1 KB
Newer Older
1 2 3
/****************************************************************************
 * Driver for Solarflare Solarstorm network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
B
Ben Hutchings 已提交
4
 * Copyright 2005-2011 Solarflare Communications Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

/* Common definitions for all Efx net driver code */

#ifndef EFX_NET_DRIVER_H
#define EFX_NET_DRIVER_H

#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
20
#include <linux/timer.h>
21
#include <linux/mdio.h>
22 23 24 25 26
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
27
#include <linux/mutex.h>
28
#include <linux/vmalloc.h>
29
#include <linux/i2c.h>
30
#include <linux/mtd/mtd.h>
31 32 33

#include "enum.h"
#include "bitfield.h"
34
#include "filter.h"
35 36 37 38 39 40

/**************************************************************************
 *
 * Build definitions
 *
 **************************************************************************/
41

B
Ben Hutchings 已提交
42
#define EFX_DRIVER_VERSION	"3.2"
43

44
#ifdef DEBUG
45 46 47 48 49 50 51 52 53 54 55 56 57
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
#else
#define EFX_BUG_ON_PARANOID(x) do {} while (0)
#define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif

/**************************************************************************
 *
 * Efx data structures
 *
 **************************************************************************/

58
#define EFX_MAX_CHANNELS 32U
59
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
60
#define EFX_EXTRA_CHANNEL_IOV	0
61 62
#define EFX_EXTRA_CHANNEL_PTP	1
#define EFX_MAX_EXTRA_CHANNELS	2U
63

B
Ben Hutchings 已提交
64 65 66
/* Checksum generation is a per-queue option in hardware, so each
 * queue visible to the networking core is backed by two hardware TX
 * queues. */
67 68 69 70 71 72
#define EFX_MAX_TX_TC		2
#define EFX_MAX_CORE_TX_QUEUES	(EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
#define EFX_TXQ_TYPE_OFFLOAD	1	/* flag */
#define EFX_TXQ_TYPE_HIGHPRI	2	/* flag */
#define EFX_TXQ_TYPES		4
#define EFX_MAX_TX_QUEUES	(EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
73

74 75 76
/* Maximum possible MTU the driver supports */
#define EFX_MAX_MTU (9 * 1024)

77 78 79 80 81 82 83 84 85 86 87 88 89 90
/* Size of an RX scatter buffer.  Small enough to pack 2 into a 4K page,
 * and should be a multiple of the cache line size.
 */
#define EFX_RX_USR_BUF_SIZE	(2048 - 256)

/* If possible, we should ensure cache line alignment at start and end
 * of every buffer.  Otherwise, we just need to ensure 4-byte
 * alignment of the network header.
 */
#if NET_IP_ALIGN == 0
#define EFX_RX_BUF_ALIGNMENT	L1_CACHE_BYTES
#else
#define EFX_RX_BUF_ALIGNMENT	4
#endif
91

92 93 94
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;

95 96
struct efx_self_tests;

97
/**
98 99
 * struct efx_buffer - A general-purpose DMA buffer
 * @addr: host base address of the buffer
100 101 102
 * @dma_addr: DMA base address of the buffer
 * @len: Buffer length, in bytes
 *
103 104
 * The NIC uses these buffers for its interrupt status registers and
 * MAC stats dumps.
105
 */
106
struct efx_buffer {
107 108 109
	void *addr;
	dma_addr_t dma_addr;
	unsigned int len;
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
};

/**
 * struct efx_special_buffer - DMA buffer entered into buffer table
 * @buf: Standard &struct efx_buffer
 * @index: Buffer index within controller;s buffer table
 * @entries: Number of buffer table entries
 *
 * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
 * Event and descriptor rings are addressed via one or more buffer
 * table entries (and so can be physically non-contiguous, although we
 * currently do not take advantage of that).  On Falcon and Siena we
 * have to take care of allocating and initialising the entries
 * ourselves.  On later hardware this is managed by the firmware and
 * @index and @entries are left as 0.
 */
struct efx_special_buffer {
	struct efx_buffer buf;
128 129
	unsigned int index;
	unsigned int entries;
130 131 132
};

/**
133 134 135
 * struct efx_tx_buffer - buffer state for a TX descriptor
 * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
 *	freed when descriptor completes
136 137
 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
 *	freed when descriptor completes.
138
 * @dma_addr: DMA address of the fragment.
139
 * @flags: Flags for allocation and DMA mapping type
140 141 142 143 144
 * @len: Length of this fragment.
 *	This field is zero when the queue slot is empty.
 * @unmap_len: Length of this fragment to unmap
 */
struct efx_tx_buffer {
145 146
	union {
		const struct sk_buff *skb;
147
		void *heap_buf;
148
	};
149
	dma_addr_t dma_addr;
150
	unsigned short flags;
151 152 153
	unsigned short len;
	unsigned short unmap_len;
};
154 155
#define EFX_TX_BUF_CONT		1	/* not last descriptor of packet */
#define EFX_TX_BUF_SKB		2	/* buffer is last part of skb */
156
#define EFX_TX_BUF_HEAP		4	/* buffer was allocated with kmalloc() */
157
#define EFX_TX_BUF_MAP_SINGLE	8	/* buffer was mapped with dma_map_single() */
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174

/**
 * struct efx_tx_queue - An Efx TX queue
 *
 * This is a ring buffer of TX fragments.
 * Since the TX completion path always executes on the same
 * CPU and the xmit path can operate on different CPUs,
 * performance is increased by ensuring that the completion
 * path and the xmit path operate on different cache lines.
 * This is particularly important if the xmit path is always
 * executing on one CPU which is different from the completion
 * path.  There is also a cache line for members which are
 * read but not written on the fast path.
 *
 * @efx: The associated Efx NIC
 * @queue: DMA queue number
 * @channel: The associated channel
175
 * @core_txq: The networking core TX queue structure
176
 * @buffer: The software buffer ring
177
 * @tsoh_page: Array of pages of TSO header buffers
178
 * @txd: The hardware descriptor ring
179
 * @ptr_mask: The size of the ring minus 1.
180
 * @initialised: Has hardware queue been initialised?
181 182
 * @read_count: Current read pointer.
 *	This is the number of buffers that have been removed from both rings.
183 184 185 186 187 188
 * @old_write_count: The value of @write_count when last checked.
 *	This is here for performance reasons.  The xmit path will
 *	only get the up-to-date value of @write_count if this
 *	variable indicates that the queue is empty.  This is to
 *	avoid cache-line ping-pong between the xmit path and the
 *	completion path.
189 190 191 192 193 194 195 196 197 198 199 200
 * @insert_count: Current insert pointer
 *	This is the number of buffers that have been added to the
 *	software ring.
 * @write_count: Current write pointer
 *	This is the number of buffers that have been added to the
 *	hardware ring.
 * @old_read_count: The value of read_count when last checked.
 *	This is here for performance reasons.  The xmit path will
 *	only get the up-to-date value of read_count if this
 *	variable indicates that the queue is full.  This is to
 *	avoid cache-line ping-pong between the xmit path and the
 *	completion path.
B
Ben Hutchings 已提交
201 202 203 204
 * @tso_bursts: Number of times TSO xmit invoked by kernel
 * @tso_long_headers: Number of packets with headers too long for standard
 *	blocks
 * @tso_packets: Number of packets via the TSO xmit path
205 206 207 208
 * @pushes: Number of times the TX push feature has been used
 * @empty_read_count: If the completion path has seen the queue as empty
 *	and the transmission path has not yet checked this, the value of
 *	@read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
209 210 211 212
 */
struct efx_tx_queue {
	/* Members which don't change on the fast path */
	struct efx_nic *efx ____cacheline_aligned_in_smp;
B
Ben Hutchings 已提交
213
	unsigned queue;
214
	struct efx_channel *channel;
215
	struct netdev_queue *core_txq;
216
	struct efx_tx_buffer *buffer;
217
	struct efx_buffer *tsoh_page;
218
	struct efx_special_buffer txd;
219
	unsigned int ptr_mask;
220
	bool initialised;
221 222 223

	/* Members used mainly on the completion path */
	unsigned int read_count ____cacheline_aligned_in_smp;
224
	unsigned int old_write_count;
225 226 227 228 229

	/* Members used only on the xmit path */
	unsigned int insert_count ____cacheline_aligned_in_smp;
	unsigned int write_count;
	unsigned int old_read_count;
B
Ben Hutchings 已提交
230 231 232
	unsigned int tso_bursts;
	unsigned int tso_long_headers;
	unsigned int tso_packets;
233 234 235 236 237
	unsigned int pushes;

	/* Members shared between paths and sometimes updated */
	unsigned int empty_read_count ____cacheline_aligned_in_smp;
#define EFX_EMPTY_COUNT_VALID 0x80000000
238
	atomic_t flush_outstanding;
239 240 241 242 243
};

/**
 * struct efx_rx_buffer - An Efx RX data buffer
 * @dma_addr: DMA base address of the buffer
244
 * @page: The associated page buffer.
245
 *	Will be %NULL if the buffer slot is currently free.
246 247
 * @page_offset: If pending: offset in @page of DMA base address.
 *	If completed: offset in @page of Ethernet header.
248 249
 * @len: If pending: length for DMA descriptor.
 *	If completed: received length, excluding hash prefix.
250 251
 * @flags: Flags for buffer and packet state.  These are only set on the
 *	first buffer of a scattered packet.
252 253 254
 */
struct efx_rx_buffer {
	dma_addr_t dma_addr;
255
	struct page *page;
256 257
	u16 page_offset;
	u16 len;
258
	u16 flags;
259
};
260
#define EFX_RX_BUF_LAST_IN_PAGE	0x0001
261 262
#define EFX_RX_PKT_CSUMMED	0x0002
#define EFX_RX_PKT_DISCARD	0x0004
263
#define EFX_RX_PKT_TCP		0x0040
264

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
/**
 * struct efx_rx_page_state - Page-based rx buffer state
 *
 * Inserted at the start of every page allocated for receive buffers.
 * Used to facilitate sharing dma mappings between recycled rx buffers
 * and those passed up to the kernel.
 *
 * @refcnt: Number of struct efx_rx_buffer's referencing this page.
 *	When refcnt falls to zero, the page is unmapped for dma
 * @dma_addr: The dma address of this page.
 */
struct efx_rx_page_state {
	unsigned refcnt;
	dma_addr_t dma_addr;

	unsigned int __pad[0] ____cacheline_aligned;
};

283 284 285
/**
 * struct efx_rx_queue - An Efx RX queue
 * @efx: The associated Efx NIC
286 287
 * @core_index:  Index of network core RX queue.  Will be >= 0 iff this
 *	is associated with a real RX queue.
288 289
 * @buffer: The software buffer ring
 * @rxd: The hardware descriptor ring
290
 * @ptr_mask: The size of the ring minus 1.
291
 * @refill_enabled: Enable refill whenever fill level is low
292 293
 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
 *	@rxq_flush_pending.
294 295 296
 * @added_count: Number of buffers added to the receive queue.
 * @notified_count: Number of buffers given to NIC (<= @added_count).
 * @removed_count: Number of buffers removed from the receive queue.
297
 * @scatter_n: Number of buffers used by current packet
298 299 300 301 302 303 304 305 306
 * @page_ring: The ring to store DMA mapped pages for reuse.
 * @page_add: Counter to calculate the write pointer for the recycle ring.
 * @page_remove: Counter to calculate the read pointer for the recycle ring.
 * @page_recycle_count: The number of pages that have been recycled.
 * @page_recycle_failed: The number of pages that couldn't be recycled because
 *      the kernel still held a reference to them.
 * @page_recycle_full: The number of pages that were released because the
 *      recycle ring was full.
 * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
307 308 309 310 311 312
 * @max_fill: RX descriptor maximum fill level (<= ring size)
 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
 *	(<= @max_fill)
 * @min_fill: RX descriptor minimum non-zero fill level.
 *	This records the minimum fill level observed when a ring
 *	refill was triggered.
313
 * @recycle_count: RX buffer recycle counter.
314
 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
315 316 317
 */
struct efx_rx_queue {
	struct efx_nic *efx;
318
	int core_index;
319 320
	struct efx_rx_buffer *buffer;
	struct efx_special_buffer rxd;
321
	unsigned int ptr_mask;
322
	bool refill_enabled;
323
	bool flush_pending;
324

325 326 327
	unsigned int added_count;
	unsigned int notified_count;
	unsigned int removed_count;
328
	unsigned int scatter_n;
329 330 331 332 333 334 335
	struct page **page_ring;
	unsigned int page_add;
	unsigned int page_remove;
	unsigned int page_recycle_count;
	unsigned int page_recycle_failed;
	unsigned int page_recycle_full;
	unsigned int page_ptr_mask;
336 337 338 339
	unsigned int max_fill;
	unsigned int fast_fill_trigger;
	unsigned int min_fill;
	unsigned int min_overfill;
340
	unsigned int recycle_count;
341
	struct timer_list slow_fill;
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
	unsigned int slow_fill_count;
};

enum efx_rx_alloc_method {
	RX_ALLOC_METHOD_AUTO = 0,
	RX_ALLOC_METHOD_SKB = 1,
	RX_ALLOC_METHOD_PAGE = 2,
};

/**
 * struct efx_channel - An Efx channel
 *
 * A channel comprises an event queue, at least one TX queue, at least
 * one RX queue, and an associated tasklet for processing the event
 * queue.
 *
 * @efx: Associated Efx NIC
 * @channel: Channel instance number
360
 * @type: Channel type definition
361
 * @eventq_init: Event queue initialised flag
362 363
 * @enabled: Channel enabled indicator
 * @irq: IRQ number (MSI and MSI-X only)
364
 * @irq_moderation: IRQ moderation value (in hardware ticks)
365 366 367
 * @napi_dev: Net device used with NAPI
 * @napi_str: NAPI control structure
 * @eventq: Event queue buffer
368
 * @eventq_mask: Event queue pointer mask
369
 * @eventq_read_ptr: Event queue read pointer
370
 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
371 372
 * @irq_count: Number of IRQs since last adaptive moderation decision
 * @irq_mod_score: IRQ moderation score
373 374 375
 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
B
Ben Hutchings 已提交
376
 * @n_rx_mcast_mismatch: Count of unmatched multicast frames
377 378 379
 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
 * @n_rx_overlength: Count of RX_OVERLENGTH errors
 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
380 381 382 383 384 385
 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
 *	lack of descriptors
 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
 *	__efx_rx_packet(), or zero if there is none
 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
 *	by __efx_rx_packet(), if @rx_pkt_n_frags != 0
386 387
 * @rx_queue: RX queue for this channel
 * @tx_queue: TX queues for this channel
388 389 390 391
 */
struct efx_channel {
	struct efx_nic *efx;
	int channel;
392
	const struct efx_channel_type *type;
393
	bool eventq_init;
394
	bool enabled;
395 396 397 398 399
	int irq;
	unsigned int irq_moderation;
	struct net_device *napi_dev;
	struct napi_struct napi_str;
	struct efx_special_buffer eventq;
400
	unsigned int eventq_mask;
401
	unsigned int eventq_read_ptr;
402
	int event_test_cpu;
403

404 405
	unsigned int irq_count;
	unsigned int irq_mod_score;
406 407 408
#ifdef CONFIG_RFS_ACCEL
	unsigned int rfs_filters_added;
#endif
409

410 411 412
	unsigned n_rx_tobe_disc;
	unsigned n_rx_ip_hdr_chksum_err;
	unsigned n_rx_tcp_udp_chksum_err;
B
Ben Hutchings 已提交
413
	unsigned n_rx_mcast_mismatch;
414 415 416
	unsigned n_rx_frm_trunc;
	unsigned n_rx_overlength;
	unsigned n_skbuff_leaks;
417
	unsigned int n_rx_nodesc_trunc;
418

419 420
	unsigned int rx_pkt_n_frags;
	unsigned int rx_pkt_index;
421

422
	struct efx_rx_queue rx_queue;
423
	struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
424 425
};

B
Ben Hutchings 已提交
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
/**
 * struct efx_msi_context - Context for each MSI
 * @efx: The associated NIC
 * @index: Index of the channel/IRQ
 * @name: Name of the channel/IRQ
 *
 * Unlike &struct efx_channel, this is never reallocated and is always
 * safe for the IRQ handler to access.
 */
struct efx_msi_context {
	struct efx_nic *efx;
	unsigned int index;
	char name[IFNAMSIZ + 6];
};

441 442 443 444 445 446 447 448 449
/**
 * struct efx_channel_type - distinguishes traffic and extra channels
 * @handle_no_channel: Handle failure to allocate an extra channel
 * @pre_probe: Set up extra state prior to initialisation
 * @post_remove: Tear down extra state after finalisation, if allocated.
 *	May be called on channels that have not been probed.
 * @get_name: Generate the channel's name (used for its IRQ handler)
 * @copy: Copy the channel state prior to reallocation.  May be %NULL if
 *	reallocation is not supported.
450
 * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
451 452 453 454 455 456
 * @keep_eventq: Flag for whether event queue should be kept initialised
 *	while the device is stopped
 */
struct efx_channel_type {
	void (*handle_no_channel)(struct efx_nic *);
	int (*pre_probe)(struct efx_channel *);
457
	void (*post_remove)(struct efx_channel *);
458 459
	void (*get_name)(struct efx_channel *, char *buf, size_t len);
	struct efx_channel *(*copy)(const struct efx_channel *);
460
	bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
461 462 463
	bool keep_eventq;
};

464 465 466 467 468 469
enum efx_led_mode {
	EFX_LED_OFF	= 0,
	EFX_LED_ON	= 1,
	EFX_LED_DEFAULT	= 2
};

470 471 472
#define STRING_TABLE_LOOKUP(val, member) \
	((val) < member ## _max) ? member ## _names[val] : "(invalid)"

473
extern const char *const efx_loopback_mode_names[];
474 475 476 477
extern const unsigned int efx_loopback_mode_max;
#define LOOPBACK_MODE(efx) \
	STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)

478
extern const char *const efx_reset_type_names[];
479 480 481
extern const unsigned int efx_reset_type_max;
#define RESET_TYPE(type) \
	STRING_TABLE_LOOKUP(type, efx_reset_type)
482

483 484 485 486 487 488 489 490 491 492
enum efx_int_mode {
	/* Be careful if altering to correct macro below */
	EFX_INT_MODE_MSIX = 0,
	EFX_INT_MODE_MSI = 1,
	EFX_INT_MODE_LEGACY = 2,
	EFX_INT_MODE_MAX	/* Insert any new items before this */
};
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)

enum nic_state {
493 494 495
	STATE_UNINIT = 0,	/* device being probed/removed or is frozen */
	STATE_READY = 1,	/* hardware ready and netdev registered */
	STATE_DISABLED = 2,	/* device disabled due to hardware errors */
496
	STATE_RECOVERY = 3,	/* device recovering from PCI error */
497 498 499 500 501 502 503
};

/*
 * Alignment of the skb->head which wraps a page-allocated RX buffer
 *
 * The skb allocated to wrap an rx_buffer can have this alignment. Since
 * the data is memcpy'd from the rx_buf, it does not need to be equal to
504
 * NET_IP_ALIGN.
505 506 507 508 509 510 511
 */
#define EFX_PAGE_SKB_ALIGN 2

/* Forward declaration */
struct efx_nic;

/* Pseudo bit-mask flow control field */
512 513 514
#define EFX_FC_RX	FLOW_CTRL_RX
#define EFX_FC_TX	FLOW_CTRL_TX
#define EFX_FC_AUTO	4
515

516 517 518 519 520 521 522 523 524 525
/**
 * struct efx_link_state - Current state of the link
 * @up: Link is up
 * @fd: Link is full-duplex
 * @fc: Actual flow control flags
 * @speed: Link speed (Mbps)
 */
struct efx_link_state {
	bool up;
	bool fd;
526
	u8 fc;
527 528 529
	unsigned int speed;
};

S
Steve Hodgson 已提交
530 531 532 533 534 535 536
static inline bool efx_link_state_equal(const struct efx_link_state *left,
					const struct efx_link_state *right)
{
	return left->up == right->up && left->fd == right->fd &&
		left->fc == right->fc && left->speed == right->speed;
}

537 538
/**
 * struct efx_phy_operations - Efx PHY operations table
539 540
 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
 *	efx->loopback_modes.
541 542 543
 * @init: Initialise PHY
 * @fini: Shut down PHY
 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
S
Steve Hodgson 已提交
544 545
 * @poll: Update @link_state and report whether it changed.
 *	Serialised by the mac_lock.
546 547
 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
548
 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
B
Ben Hutchings 已提交
549
 *	(only needed where AN bit is set in mmds)
550
 * @test_alive: Test that PHY is 'alive' (online)
551
 * @test_name: Get the name of a PHY-specific test/result
552
 * @run_tests: Run tests and record results as appropriate (offline).
553
 *	Flags are the ethtool tests flags.
554 555
 */
struct efx_phy_operations {
556
	int (*probe) (struct efx_nic *efx);
557 558
	int (*init) (struct efx_nic *efx);
	void (*fini) (struct efx_nic *efx);
559
	void (*remove) (struct efx_nic *efx);
B
Ben Hutchings 已提交
560
	int (*reconfigure) (struct efx_nic *efx);
S
Steve Hodgson 已提交
561
	bool (*poll) (struct efx_nic *efx);
562 563 564 565
	void (*get_settings) (struct efx_nic *efx,
			      struct ethtool_cmd *ecmd);
	int (*set_settings) (struct efx_nic *efx,
			     struct ethtool_cmd *ecmd);
566
	void (*set_npage_adv) (struct efx_nic *efx, u32);
567
	int (*test_alive) (struct efx_nic *efx);
568
	const char *(*test_name) (struct efx_nic *efx, unsigned int index);
569
	int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
570 571 572 573 574
	int (*get_module_eeprom) (struct efx_nic *efx,
			       struct ethtool_eeprom *ee,
			       u8 *data);
	int (*get_module_info) (struct efx_nic *efx,
				struct ethtool_modinfo *modinfo);
575 576
};

577
/**
578
 * enum efx_phy_mode - PHY operating mode flags
579 580
 * @PHY_MODE_NORMAL: on and should pass traffic
 * @PHY_MODE_TX_DISABLED: on with TX disabled
581 582
 * @PHY_MODE_LOW_POWER: set to low power through MDIO
 * @PHY_MODE_OFF: switched off through external control
583 584 585 586 587
 * @PHY_MODE_SPECIAL: on but will not pass traffic
 */
enum efx_phy_mode {
	PHY_MODE_NORMAL		= 0,
	PHY_MODE_TX_DISABLED	= 1,
588 589
	PHY_MODE_LOW_POWER	= 2,
	PHY_MODE_OFF		= 4,
590 591 592 593 594
	PHY_MODE_SPECIAL	= 8,
};

static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
{
B
Ben Hutchings 已提交
595
	return !!(mode & ~PHY_MODE_TX_DISABLED);
596 597
}

598 599 600 601 602 603 604 605 606 607 608
/*
 * Efx extended statistics
 *
 * Not all statistics are provided by all supported MACs.  The purpose
 * is this structure is to contain the raw statistics provided by each
 * MAC.
 */
struct efx_mac_stats {
	u64 tx_bytes;
	u64 tx_good_bytes;
	u64 tx_bad_bytes;
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
	u64 tx_packets;
	u64 tx_bad;
	u64 tx_pause;
	u64 tx_control;
	u64 tx_unicast;
	u64 tx_multicast;
	u64 tx_broadcast;
	u64 tx_lt64;
	u64 tx_64;
	u64 tx_65_to_127;
	u64 tx_128_to_255;
	u64 tx_256_to_511;
	u64 tx_512_to_1023;
	u64 tx_1024_to_15xx;
	u64 tx_15xx_to_jumbo;
	u64 tx_gtjumbo;
	u64 tx_collision;
	u64 tx_single_collision;
	u64 tx_multiple_collision;
	u64 tx_excessive_collision;
	u64 tx_deferred;
	u64 tx_late_collision;
	u64 tx_excessive_deferred;
	u64 tx_non_tcpudp;
	u64 tx_mac_src_error;
	u64 tx_ip_src_error;
635 636 637
	u64 rx_bytes;
	u64 rx_good_bytes;
	u64 rx_bad_bytes;
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
	u64 rx_packets;
	u64 rx_good;
	u64 rx_bad;
	u64 rx_pause;
	u64 rx_control;
	u64 rx_unicast;
	u64 rx_multicast;
	u64 rx_broadcast;
	u64 rx_lt64;
	u64 rx_64;
	u64 rx_65_to_127;
	u64 rx_128_to_255;
	u64 rx_256_to_511;
	u64 rx_512_to_1023;
	u64 rx_1024_to_15xx;
	u64 rx_15xx_to_jumbo;
	u64 rx_gtjumbo;
	u64 rx_bad_lt64;
	u64 rx_bad_64_to_15xx;
	u64 rx_bad_15xx_to_jumbo;
	u64 rx_bad_gtjumbo;
	u64 rx_overflow;
	u64 rx_missed;
	u64 rx_false_carrier;
	u64 rx_symbol_error;
	u64 rx_align_error;
	u64 rx_length_error;
	u64 rx_internal_error;
	u64 rx_good_lt64;
667 668 669 670 671 672 673 674 675 676 677 678 679 680
};

/* Number of bits used in a multicast filter hash address */
#define EFX_MCAST_HASH_BITS 8

/* Number of (single-bit) entries in a multicast filter hash */
#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)

/* An Efx multicast filter hash */
union efx_multicast_hash {
	u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
	efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};

681 682
struct efx_vf;
struct vfdi_status;
B
Ben Hutchings 已提交
683

684 685 686 687 688 689
/**
 * struct efx_nic - an Efx NIC
 * @name: Device name (net device name or bus id before net device registered)
 * @pci_dev: The PCI device
 * @type: Controller type attributes
 * @legacy_irq: IRQ number
690 691
 * @workqueue: Workqueue for port reconfigures and the HW monitor.
 *	Work items do not hold and must not acquire RTNL.
692
 * @workqueue_name: Name of workqueue
693 694 695 696
 * @reset_work: Scheduled reset workitem
 * @membase_phys: Memory BAR value as physical address
 * @membase: Memory BAR value
 * @interrupt_mode: Interrupt mode
697
 * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
698 699
 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
 * @irq_rx_moderation: IRQ moderation time for RX event queues
700
 * @msg_enable: Log message enable flags
701
 * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
702
 * @reset_pending: Bitmask for pending resets
703 704 705
 * @tx_queue: TX DMA queues
 * @rx_queue: RX DMA queues
 * @channel: Channels
B
Ben Hutchings 已提交
706
 * @msi_context: Context for each MSI
707 708
 * @extra_channel_types: Types of extra (non-traffic) channels that
 *	should be allocated for this NIC
709 710
 * @rxq_entries: Size of receive queues requested by user.
 * @txq_entries: Size of transmit queues requested by user.
711 712
 * @txq_stop_thresh: TX queue fill level at or above which we stop it.
 * @txq_wake_thresh: TX queue fill level at or below which we wake it.
713 714 715
 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
 * @sram_lim_qw: Qword address limit of SRAM
716
 * @next_buffer_table: First available buffer table id
717
 * @n_channels: Number of channels in use
B
Ben Hutchings 已提交
718 719
 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
 * @n_tx_channels: Number of channels used for TX
720
 * @rx_dma_len: Current maximum RX DMA length
721
 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
722 723
 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
 *	for use in sk_buff::truesize
724
 * @rx_hash_key: Toeplitz hash key for RSS
725
 * @rx_indir_table: Indirection table for RSS
726
 * @rx_scatter: Scatter mode enabled for receives
727 728
 * @int_error_count: Number of internal errors seen recently
 * @int_error_expire: Time at which error count will be expired
B
Ben Hutchings 已提交
729 730
 * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
 *	acknowledge but do nothing else.
731
 * @irq_status: Interrupt status buffer
732
 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
733
 * @irq_level: IRQ level/index for IRQs not triggered by an event queue
734
 * @selftest_work: Work item for asynchronous self-test
735
 * @mtd_list: List of MTDs attached to the NIC
L
Lucas De Marchi 已提交
736
 * @nic_data: Hardware dependent state
737
 * @mcdi: Management-Controller-to-Driver Interface state
B
Ben Hutchings 已提交
738
 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
739
 *	efx_monitor() and efx_reconfigure_port()
740
 * @port_enabled: Port enabled indicator.
S
Steve Hodgson 已提交
741 742 743 744
 *	Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
 *	efx_mac_work() with kernel interfaces. Safe to read under any
 *	one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
 *	be held to modify it.
745 746 747 748 749 750
 * @port_initialized: Port initialized?
 * @net_dev: Operating system network device. Consider holding the rtnl lock
 * @stats_buffer: DMA buffer for statistics
 * @phy_type: PHY type
 * @phy_op: PHY interface
 * @phy_data: PHY private data (including PHY-specific stats)
751
 * @mdio: PHY MDIO interface
752
 * @mdio_bus: PHY MDIO bus ID (only used by Siena)
B
Ben Hutchings 已提交
753
 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
B
Ben Hutchings 已提交
754
 * @link_advertising: Autonegotiation advertising flags
755
 * @link_state: Current state of the link
756
 * @n_link_state_changes: Number of times the link has changed state
757 758 759 760
 * @unicast_filter: Flag for Falcon-arch simple unicast filter.
 *	Protected by @mac_lock.
 * @multicast_hash: Multicast hash table for Falcon-arch.
 *	Protected by @mac_lock.
B
Ben Hutchings 已提交
761
 * @wanted_fc: Wanted flow control flags
762 763 764
 * @fc_disable: When non-zero flow control is disabled. Typically used to
 *	ensure that network back pressure doesn't delay dma queue flushes.
 *	Serialised by the rtnl lock.
765
 * @mac_work: Work item for changing MAC promiscuity and multicast hash
766 767 768
 * @loopback_mode: Loopback status
 * @loopback_modes: Supported loopback mode bitmask
 * @loopback_selftest: Offline self-test private state
769 770 771 772 773
 * @filter_lock: Filter table lock
 * @filter_state: Architecture-dependent filter table state
 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
 *	indexed by filter ID
 * @rps_expire_index: Next index to check for expiry in @rps_flow_id
774 775 776 777 778 779 780
 * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
 *	Decremented when the efx_flush_rx_queue() is called.
 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
 *	completed (either success or failure). Not used when MCDI is used to
 *	flush receive queues.
 * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
781 782 783 784 785 786 787 788 789 790 791
 * @vf: Array of &struct efx_vf objects.
 * @vf_count: Number of VFs intended to be enabled.
 * @vf_init_count: Number of VFs that have been fully initialised.
 * @vi_scale: log2 number of vnics per VF.
 * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
 * @vfdi_status: Common VFDI status page to be dmad to VF address space.
 * @local_addr_list: List of local addresses. Protected by %local_lock.
 * @local_page_list: List of DMA addressable pages used to broadcast
 *	%local_addr_list. Protected by %local_lock.
 * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
 * @peer_work: Work item to broadcast peer addresses to VMs.
792
 * @ptp_data: PTP state data
793 794
 * @monitor_work: Hardware monitor workitem
 * @biu_lock: BIU (bus interface unit) lock
795 796 797
 * @last_irq_cpu: Last CPU to handle a possible test interrupt.  This
 *	field is used by efx_test_interrupts() to verify that an
 *	interrupt has occurred.
798 799 800 801 802
 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
 * @mac_stats: MAC statistics. These include all statistics the MACs
 *	can provide.  Generic code converts these into a standard
 *	&struct net_device_stats.
 * @stats_lock: Statistics update lock. Serialises statistics fetches
803
 *	and access to @mac_stats.
804
 *
805
 * This is stored in the private area of the &struct net_device.
806 807
 */
struct efx_nic {
808 809
	/* The following fields should be written very rarely */

810 811
	char name[IFNAMSIZ];
	struct pci_dev *pci_dev;
812
	unsigned int port_num;
813 814
	const struct efx_nic_type *type;
	int legacy_irq;
815
	bool eeh_disabled_legacy_irq;
816
	struct workqueue_struct *workqueue;
817
	char workqueue_name[16];
818
	struct work_struct reset_work;
819
	resource_size_t membase_phys;
820
	void __iomem *membase;
821

822
	enum efx_int_mode interrupt_mode;
823
	unsigned int timer_quantum_ns;
824 825
	bool irq_rx_adaptive;
	unsigned int irq_rx_moderation;
826
	u32 msg_enable;
827 828

	enum nic_state state;
829
	unsigned long reset_pending;
830

831
	struct efx_channel *channel[EFX_MAX_CHANNELS];
B
Ben Hutchings 已提交
832
	struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
833 834
	const struct efx_channel_type *
	extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
835

836 837
	unsigned rxq_entries;
	unsigned txq_entries;
838 839 840
	unsigned int txq_stop_thresh;
	unsigned int txq_wake_thresh;

841 842 843
	unsigned tx_dc_base;
	unsigned rx_dc_base;
	unsigned sram_lim_qw;
844
	unsigned next_buffer_table;
845 846

	unsigned int max_channels;
B
Ben Hutchings 已提交
847 848
	unsigned n_channels;
	unsigned n_rx_channels;
849
	unsigned rss_spread;
850
	unsigned tx_channel_offset;
B
Ben Hutchings 已提交
851
	unsigned n_tx_channels;
852
	unsigned int rx_dma_len;
853
	unsigned int rx_buffer_order;
854
	unsigned int rx_buffer_truesize;
855
	unsigned int rx_page_buf_step;
856
	unsigned int rx_bufs_per_page;
857
	unsigned int rx_pages_per_batch;
858
	u8 rx_hash_key[40];
859
	u32 rx_indir_table[128];
860
	bool rx_scatter;
861

862 863 864
	unsigned int_error_count;
	unsigned long int_error_expire;

B
Ben Hutchings 已提交
865
	bool irq_soft_enabled;
866
	struct efx_buffer irq_status;
867
	unsigned irq_zero_count;
868
	unsigned irq_level;
869
	struct delayed_work selftest_work;
870

871 872 873
#ifdef CONFIG_SFC_MTD
	struct list_head mtd_list;
#endif
874

875
	void *nic_data;
876
	struct efx_mcdi_data *mcdi;
877 878

	struct mutex mac_lock;
879
	struct work_struct mac_work;
880
	bool port_enabled;
881

882
	bool port_initialized;
883 884 885 886
	struct net_device *net_dev;

	struct efx_buffer stats_buffer;

887
	unsigned int phy_type;
888
	const struct efx_phy_operations *phy_op;
889
	void *phy_data;
890
	struct mdio_if_info mdio;
891
	unsigned int mdio_bus;
892
	enum efx_phy_mode phy_mode;
893

B
Ben Hutchings 已提交
894
	u32 link_advertising;
895
	struct efx_link_state link_state;
896 897
	unsigned int n_link_state_changes;

898
	bool unicast_filter;
899
	union efx_multicast_hash multicast_hash;
900
	u8 wanted_fc;
901
	unsigned fc_disable;
902 903

	atomic_t rx_reset;
904
	enum efx_loopback_mode loopback_mode;
905
	u64 loopback_modes;
906 907

	void *loopback_selftest;
B
Ben Hutchings 已提交
908

909 910 911 912 913 914
	spinlock_t filter_lock;
	void *filter_state;
#ifdef CONFIG_RFS_ACCEL
	u32 *rps_flow_id;
	unsigned int rps_expire_index;
#endif
915

916 917 918 919 920
	atomic_t drain_pending;
	atomic_t rxq_flush_pending;
	atomic_t rxq_flush_outstanding;
	wait_queue_head_t flush_wq;

921 922 923 924 925 926 927 928 929 930 931 932 933 934
#ifdef CONFIG_SFC_SRIOV
	struct efx_channel *vfdi_channel;
	struct efx_vf *vf;
	unsigned vf_count;
	unsigned vf_init_count;
	unsigned vi_scale;
	unsigned vf_buftbl_base;
	struct efx_buffer vfdi_status;
	struct list_head local_addr_list;
	struct list_head local_page_list;
	struct mutex local_lock;
	struct work_struct peer_work;
#endif

935 936
	struct efx_ptp_data *ptp_data;

937 938 939 940
	/* The following fields may be written more often */

	struct delayed_work monitor_work ____cacheline_aligned_in_smp;
	spinlock_t biu_lock;
941
	int last_irq_cpu;
942 943 944
	unsigned n_rx_nodesc_drop_cnt;
	struct efx_mac_stats mac_stats;
	spinlock_t stats_lock;
945 946
};

947 948 949 950 951
static inline int efx_dev_registered(struct efx_nic *efx)
{
	return efx->net_dev->reg_state == NETREG_REGISTERED;
}

952 953
static inline unsigned int efx_port_num(struct efx_nic *efx)
{
954
	return efx->port_num;
955 956
}

957 958 959 960 961 962 963 964
struct efx_mtd_partition {
	struct list_head node;
	struct mtd_info mtd;
	const char *dev_type_name;
	const char *type_name;
	char name[IFNAMSIZ + 20];
};

965 966
/**
 * struct efx_nic_type - Efx device type definition
967
 * @mem_map_size: Get memory BAR mapped size
968 969 970
 * @probe: Probe the controller
 * @remove: Free resources allocated by probe()
 * @init: Initialise the controller
971 972
 * @dimension_resources: Dimension controller resources (buffer table,
 *	and VIs once the available interrupt resources are clear)
973 974
 * @fini: Shut down the controller
 * @monitor: Periodic function for polling link state and hardware monitor
975 976
 * @map_reset_reason: Map ethtool reset reason to a reset method
 * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
977 978 979 980
 * @reset: Reset the controller hardware and possibly the PHY.  This will
 *	be called while the controller is uninitialised.
 * @probe_port: Probe the MAC and PHY
 * @remove_port: Free resources allocated by probe_port()
981
 * @handle_global_event: Handle a "global" event (may be %NULL)
982
 * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
983
 * @prepare_flush: Prepare the hardware for flushing the DMA queues
984 985 986
 *	(for Falcon architecture)
 * @finish_flush: Clean up after flushing the DMA queues (for Falcon
 *	architecture)
987 988 989
 * @update_stats: Update statistics not provided by event handling
 * @start_stats: Start the regular fetching of statistics
 * @stop_stats: Stop the regular fetching of statistics
990
 * @set_id_led: Set state of identifying LED or revert to automatic function
991
 * @push_irq_moderation: Apply interrupt moderation value
B
Ben Hutchings 已提交
992
 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
993
 * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
994 995
 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
 *	to the hardware.  Serialised by the mac_lock.
996
 * @check_mac_fault: Check MAC fault state. True if fault present.
997 998 999
 * @get_wol: Get WoL configuration from driver state
 * @set_wol: Push WoL configuration to the NIC
 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
1000
 * @test_chip: Test registers.  May use efx_farch_test_registers(), and is
1001
 *	expected to reset the NIC.
1002
 * @test_nvram: Test validity of NVRAM contents
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
 * @mcdi_request: Send an MCDI request with the given header and SDU.
 *	The SDU length may be any value from 0 up to the protocol-
 *	defined maximum, but its buffer will be padded to a multiple
 *	of 4 bytes.
 * @mcdi_poll_response: Test whether an MCDI response is available.
 * @mcdi_read_response: Read the MCDI response PDU.  The offset will
 *	be a multiple of 4.  The length may not be, but the buffer
 *	will be padded so it is safe to round up.
 * @mcdi_poll_reboot: Test whether the MCDI has rebooted.  If so,
 *	return an appropriate error code for aborting any current
 *	request; otherwise return 0.
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
 * @irq_enable_master: Enable IRQs on the NIC.  Each event queue must
 *	be separately enabled after this.
 * @irq_test_generate: Generate a test IRQ
 * @irq_disable_non_ev: Disable non-event IRQs on the NIC.  Each event
 *	queue must be separately disabled before this.
 * @irq_handle_msi: Handle MSI for a channel.  The @dev_id argument is
 *	a pointer to the &struct efx_msi_context for the channel.
 * @irq_handle_legacy: Handle legacy interrupt.  The @dev_id argument
 *	is a pointer to the &struct efx_nic.
 * @tx_probe: Allocate resources for TX queue
 * @tx_init: Initialise TX queue on the NIC
 * @tx_remove: Free resources for TX queue
 * @tx_write: Write TX descriptors and doorbell
 * @rx_push_indir_table: Write RSS indirection table to the NIC
 * @rx_probe: Allocate resources for RX queue
 * @rx_init: Initialise RX queue on the NIC
 * @rx_remove: Free resources for RX queue
 * @rx_write: Write RX descriptors and doorbell
 * @rx_defer_refill: Generate a refill reminder event
 * @ev_probe: Allocate resources for event queue
 * @ev_init: Initialise event queue on the NIC
 * @ev_fini: Deinitialise event queue on the NIC
 * @ev_remove: Free resources for event queue
 * @ev_process: Process events for a queue, up to the given NAPI quota
 * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
 * @ev_test_generate: Generate a test event
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
 * @filter_table_probe: Probe filter capabilities and set up filter software state
 * @filter_table_restore: Restore filters removed from hardware
 * @filter_table_remove: Remove filters from hardware and tear down software state
 * @filter_update_rx_scatter: Update filters after change to rx scatter setting
 * @filter_insert: add or replace a filter
 * @filter_remove_safe: remove a filter by ID, carefully
 * @filter_get_safe: retrieve a filter by ID, carefully
 * @filter_clear_rx: remove RX filters by priority
 * @filter_count_rx_used: Get the number of filters in use at a given priority
 * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
 * @filter_get_rx_ids: Get list of RX filters at a given priority
 * @filter_rfs_insert: Add or replace a filter for RFS.  This must be
 *	atomic.  The hardware change may be asynchronous but should
 *	not be delayed for long.  It may fail if this can't be done
 *	atomically.
 * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
 *	This must check whether the specified table entry is used by RFS
 *	and that rps_may_expire_flow() returns true for it.
1058 1059 1060 1061 1062 1063 1064 1065 1066
 * @mtd_probe: Probe and add MTD partitions associated with this net device,
 *	 using efx_mtd_add()
 * @mtd_rename: Set an MTD partition name using the net device name
 * @mtd_read: Read from an MTD partition
 * @mtd_erase: Erase part of an MTD partition
 * @mtd_write: Write to an MTD partition
 * @mtd_sync: Wait for write-back to complete on MTD partition.  This
 *	also notifies the driver that a writer has finished using this
 *	partition.
1067
 * @revision: Hardware architecture revision
1068 1069 1070 1071 1072 1073
 * @txd_ptr_tbl_base: TX descriptor ring base address
 * @rxd_ptr_tbl_base: RX descriptor ring base address
 * @buf_tbl_base: Buffer table base address
 * @evq_ptr_tbl_base: Event queue pointer table base address
 * @evq_rptr_tbl_base: Event queue read-pointer table base address
 * @max_dma_mask: Maximum possible DMA mask
1074 1075 1076
 * @rx_buffer_hash_size: Size of hash at start of RX packet
 * @rx_buffer_padding: Size of padding at end of RX packet
 * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
1077 1078
 * @max_interrupt_mode: Highest capability interrupt mode supported
 *	from &enum efx_init_mode.
1079
 * @timer_period_max: Maximum period of interrupt timer (in ticks)
1080 1081
 * @offload_features: net_device feature flags for protocol offload
 *	features implemented in hardware
B
Ben Hutchings 已提交
1082
 * @mcdi_max_ver: Maximum MCDI version supported
1083 1084
 */
struct efx_nic_type {
1085
	unsigned int (*mem_map_size)(struct efx_nic *efx);
1086 1087 1088
	int (*probe)(struct efx_nic *efx);
	void (*remove)(struct efx_nic *efx);
	int (*init)(struct efx_nic *efx);
1089
	void (*dimension_resources)(struct efx_nic *efx);
1090 1091
	void (*fini)(struct efx_nic *efx);
	void (*monitor)(struct efx_nic *efx);
1092 1093
	enum reset_type (*map_reset_reason)(enum reset_type reason);
	int (*map_reset_flags)(u32 *flags);
1094 1095 1096
	int (*reset)(struct efx_nic *efx, enum reset_type method);
	int (*probe_port)(struct efx_nic *efx);
	void (*remove_port)(struct efx_nic *efx);
1097
	bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
1098
	int (*fini_dmaq)(struct efx_nic *efx);
1099
	void (*prepare_flush)(struct efx_nic *efx);
1100
	void (*finish_flush)(struct efx_nic *efx);
1101 1102 1103
	void (*update_stats)(struct efx_nic *efx);
	void (*start_stats)(struct efx_nic *efx);
	void (*stop_stats)(struct efx_nic *efx);
1104
	void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
1105
	void (*push_irq_moderation)(struct efx_channel *channel);
B
Ben Hutchings 已提交
1106
	int (*reconfigure_port)(struct efx_nic *efx);
1107
	void (*prepare_enable_fc_tx)(struct efx_nic *efx);
1108 1109
	int (*reconfigure_mac)(struct efx_nic *efx);
	bool (*check_mac_fault)(struct efx_nic *efx);
1110 1111 1112
	void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
	int (*set_wol)(struct efx_nic *efx, u32 type);
	void (*resume_wol)(struct efx_nic *efx);
1113
	int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
1114
	int (*test_nvram)(struct efx_nic *efx);
1115 1116 1117 1118 1119 1120 1121
	void (*mcdi_request)(struct efx_nic *efx,
			     const efx_dword_t *hdr, size_t hdr_len,
			     const efx_dword_t *sdu, size_t sdu_len);
	bool (*mcdi_poll_response)(struct efx_nic *efx);
	void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
				   size_t pdu_offset, size_t pdu_len);
	int (*mcdi_poll_reboot)(struct efx_nic *efx);
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
	void (*irq_enable_master)(struct efx_nic *efx);
	void (*irq_test_generate)(struct efx_nic *efx);
	void (*irq_disable_non_ev)(struct efx_nic *efx);
	irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
	irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
	int (*tx_probe)(struct efx_tx_queue *tx_queue);
	void (*tx_init)(struct efx_tx_queue *tx_queue);
	void (*tx_remove)(struct efx_tx_queue *tx_queue);
	void (*tx_write)(struct efx_tx_queue *tx_queue);
	void (*rx_push_indir_table)(struct efx_nic *efx);
	int (*rx_probe)(struct efx_rx_queue *rx_queue);
	void (*rx_init)(struct efx_rx_queue *rx_queue);
	void (*rx_remove)(struct efx_rx_queue *rx_queue);
	void (*rx_write)(struct efx_rx_queue *rx_queue);
	void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
	int (*ev_probe)(struct efx_channel *channel);
	void (*ev_init)(struct efx_channel *channel);
	void (*ev_fini)(struct efx_channel *channel);
	void (*ev_remove)(struct efx_channel *channel);
	int (*ev_process)(struct efx_channel *channel, int quota);
	void (*ev_read_ack)(struct efx_channel *channel);
	void (*ev_test_generate)(struct efx_channel *channel);
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
	int (*filter_table_probe)(struct efx_nic *efx);
	void (*filter_table_restore)(struct efx_nic *efx);
	void (*filter_table_remove)(struct efx_nic *efx);
	void (*filter_update_rx_scatter)(struct efx_nic *efx);
	s32 (*filter_insert)(struct efx_nic *efx,
			     struct efx_filter_spec *spec, bool replace);
	int (*filter_remove_safe)(struct efx_nic *efx,
				  enum efx_filter_priority priority,
				  u32 filter_id);
	int (*filter_get_safe)(struct efx_nic *efx,
			       enum efx_filter_priority priority,
			       u32 filter_id, struct efx_filter_spec *);
	void (*filter_clear_rx)(struct efx_nic *efx,
				enum efx_filter_priority priority);
	u32 (*filter_count_rx_used)(struct efx_nic *efx,
				    enum efx_filter_priority priority);
	u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
	s32 (*filter_get_rx_ids)(struct efx_nic *efx,
				 enum efx_filter_priority priority,
				 u32 *buf, u32 size);
#ifdef CONFIG_RFS_ACCEL
	s32 (*filter_rfs_insert)(struct efx_nic *efx,
				 struct efx_filter_spec *spec);
	bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
				      unsigned int index);
#endif
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
#ifdef CONFIG_SFC_MTD
	int (*mtd_probe)(struct efx_nic *efx);
	void (*mtd_rename)(struct efx_mtd_partition *part);
	int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
			size_t *retlen, u8 *buffer);
	int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
	int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
			 size_t *retlen, const u8 *buffer);
	int (*mtd_sync)(struct mtd_info *mtd);
#endif
1180

1181
	int revision;
1182 1183 1184 1185 1186
	unsigned int txd_ptr_tbl_base;
	unsigned int rxd_ptr_tbl_base;
	unsigned int buf_tbl_base;
	unsigned int evq_ptr_tbl_base;
	unsigned int evq_rptr_tbl_base;
1187
	u64 max_dma_mask;
1188
	unsigned int rx_buffer_hash_size;
1189
	unsigned int rx_buffer_padding;
1190
	bool can_rx_scatter;
1191
	unsigned int max_interrupt_mode;
1192
	unsigned int timer_period_max;
1193
	netdev_features_t offload_features;
B
Ben Hutchings 已提交
1194
	int mcdi_max_ver;
1195
	unsigned int max_rx_ip_filters;
1196 1197 1198 1199 1200 1201 1202 1203
};

/**************************************************************************
 *
 * Prototypes and inline functions
 *
 *************************************************************************/

1204 1205 1206 1207
static inline struct efx_channel *
efx_get_channel(struct efx_nic *efx, unsigned index)
{
	EFX_BUG_ON_PARANOID(index >= efx->n_channels);
1208
	return efx->channel[index];
1209 1210
}

1211 1212
/* Iterate over all used channels */
#define efx_for_each_channel(_channel, _efx)				\
1213 1214 1215 1216
	for (_channel = (_efx)->channel[0];				\
	     _channel;							\
	     _channel = (_channel->channel + 1 < (_efx)->n_channels) ?	\
		     (_efx)->channel[_channel->channel + 1] : NULL)
1217

1218 1219 1220 1221 1222 1223 1224
/* Iterate over all used channels in reverse */
#define efx_for_each_channel_rev(_channel, _efx)			\
	for (_channel = (_efx)->channel[(_efx)->n_channels - 1];	\
	     _channel;							\
	     _channel = _channel->channel ?				\
		     (_efx)->channel[_channel->channel - 1] : NULL)

1225 1226 1227 1228 1229 1230 1231
static inline struct efx_tx_queue *
efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
{
	EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
			    type >= EFX_TXQ_TYPES);
	return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
}
1232

1233 1234 1235 1236 1237 1238
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
	return channel->channel - channel->efx->tx_channel_offset <
		channel->efx->n_tx_channels;
}

1239 1240 1241
static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{
1242 1243 1244
	EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
			    type >= EFX_TXQ_TYPES);
	return &channel->tx_queue[type];
1245
}
1246

1247 1248 1249 1250 1251 1252
static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
{
	return !(tx_queue->efx->net_dev->num_tc < 2 &&
		 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
}

1253 1254
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel)		\
1255 1256 1257 1258
	if (!efx_channel_has_tx_queues(_channel))			\
		;							\
	else								\
		for (_tx_queue = (_channel)->tx_queue;			\
1259 1260
		     _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
			     efx_tx_queue_used(_tx_queue);		\
1261
		     _tx_queue++)
1262

1263 1264
/* Iterate over all possible TX queues belonging to a channel */
#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel)	\
1265 1266 1267 1268 1269 1270
	if (!efx_channel_has_tx_queues(_channel))			\
		;							\
	else								\
		for (_tx_queue = (_channel)->tx_queue;			\
		     _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES;	\
		     _tx_queue++)
1271

1272 1273
static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
{
1274
	return channel->rx_queue.core_index >= 0;
1275 1276
}

1277 1278 1279
static inline struct efx_rx_queue *
efx_channel_get_rx_queue(struct efx_channel *channel)
{
1280 1281
	EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
	return &channel->rx_queue;
1282 1283
}

1284 1285
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel)		\
1286 1287 1288 1289 1290 1291
	if (!efx_channel_has_rx_queue(_channel))			\
		;							\
	else								\
		for (_rx_queue = &(_channel)->rx_queue;			\
		     _rx_queue;						\
		     _rx_queue = NULL)
1292

1293 1294 1295
static inline struct efx_channel *
efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
{
1296
	return container_of(rx_queue, struct efx_channel, rx_queue);
1297 1298 1299 1300
}

static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
{
1301
	return efx_rx_queue_channel(rx_queue)->channel;
1302 1303
}

1304 1305 1306 1307 1308 1309
/* Returns a pointer to the specified receive buffer in the RX
 * descriptor queue.
 */
static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
						  unsigned int index)
{
1310
	return &rx_queue->buffer[index];
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
}


/**
 * EFX_MAX_FRAME_LEN - calculate maximum frame length
 *
 * This calculates the maximum frame length that will be used for a
 * given MTU.  The frame length will be equal to the MTU plus a
 * constant amount of header space and padding.  This is the quantity
 * that the net driver will program into the MAC as the maximum frame
 * length.
 *
1323
 * The 10G MAC requires 8-byte alignment on the frame
1324
 * length, so we round up to the nearest 8.
1325 1326 1327 1328 1329
 *
 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
 * XGMII cycle).  If the frame length reaches the maximum value in the
 * same cycle, the XMAC can miss the IPG altogether.  We work around
 * this by adding a further 16 bytes.
1330 1331
 */
#define EFX_MAX_FRAME_LEN(mtu) \
1332
	((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
1333

1334 1335 1336 1337 1338 1339 1340 1341
static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
{
	return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
}
static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
{
	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
}
1342 1343

#endif /* EFX_NET_DRIVER_H */