net_driver.h 39.0 KB
Newer Older
1 2 3
/****************************************************************************
 * Driver for Solarflare Solarstorm network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
B
Ben Hutchings 已提交
4
 * Copyright 2005-2011 Solarflare Communications Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

/* Common definitions for all Efx net driver code */

#ifndef EFX_NET_DRIVER_H
#define EFX_NET_DRIVER_H

#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
20
#include <linux/timer.h>
21
#include <linux/mdio.h>
22 23 24 25 26
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
27
#include <linux/mutex.h>
28
#include <linux/vmalloc.h>
29
#include <linux/i2c.h>
30 31 32 33 34 35 36 37 38

#include "enum.h"
#include "bitfield.h"

/**************************************************************************
 *
 * Build definitions
 *
 **************************************************************************/
39

B
Ben Hutchings 已提交
40
#define EFX_DRIVER_VERSION	"3.2"
41

42
#ifdef DEBUG
43 44 45 46 47 48 49 50 51 52 53 54 55
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
#else
#define EFX_BUG_ON_PARANOID(x) do {} while (0)
#define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif

/**************************************************************************
 *
 * Efx data structures
 *
 **************************************************************************/

56
#define EFX_MAX_CHANNELS 32U
57
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
58
#define EFX_EXTRA_CHANNEL_IOV	0
59 60
#define EFX_EXTRA_CHANNEL_PTP	1
#define EFX_MAX_EXTRA_CHANNELS	2U
61

B
Ben Hutchings 已提交
62 63 64
/* Checksum generation is a per-queue option in hardware, so each
 * queue visible to the networking core is backed by two hardware TX
 * queues. */
65 66 67 68 69 70
#define EFX_MAX_TX_TC		2
#define EFX_MAX_CORE_TX_QUEUES	(EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
#define EFX_TXQ_TYPE_OFFLOAD	1	/* flag */
#define EFX_TXQ_TYPE_HIGHPRI	2	/* flag */
#define EFX_TXQ_TYPES		4
#define EFX_MAX_TX_QUEUES	(EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
71

72 73 74 75 76 77
/* Maximum possible MTU the driver supports */
#define EFX_MAX_MTU (9 * 1024)

/* Size of an RX scatter buffer.  Small enough to pack 2 into a 4K page. */
#define EFX_RX_USR_BUF_SIZE 1824

78 79 80
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;

81 82
struct efx_self_tests;

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
/**
 * struct efx_special_buffer - An Efx special buffer
 * @addr: CPU base address of the buffer
 * @dma_addr: DMA base address of the buffer
 * @len: Buffer length, in bytes
 * @index: Buffer index within controller;s buffer table
 * @entries: Number of buffer table entries
 *
 * Special buffers are used for the event queues and the TX and RX
 * descriptor queues for each channel.  They are *not* used for the
 * actual transmit and receive buffers.
 */
struct efx_special_buffer {
	void *addr;
	dma_addr_t dma_addr;
	unsigned int len;
99 100
	unsigned int index;
	unsigned int entries;
101 102 103
};

/**
104 105 106
 * struct efx_tx_buffer - buffer state for a TX descriptor
 * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
 *	freed when descriptor completes
107 108
 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
 *	freed when descriptor completes.
109
 * @dma_addr: DMA address of the fragment.
110
 * @flags: Flags for allocation and DMA mapping type
111 112 113 114 115
 * @len: Length of this fragment.
 *	This field is zero when the queue slot is empty.
 * @unmap_len: Length of this fragment to unmap
 */
struct efx_tx_buffer {
116 117
	union {
		const struct sk_buff *skb;
118
		void *heap_buf;
119
	};
120
	dma_addr_t dma_addr;
121
	unsigned short flags;
122 123 124
	unsigned short len;
	unsigned short unmap_len;
};
125 126
#define EFX_TX_BUF_CONT		1	/* not last descriptor of packet */
#define EFX_TX_BUF_SKB		2	/* buffer is last part of skb */
127
#define EFX_TX_BUF_HEAP		4	/* buffer was allocated with kmalloc() */
128
#define EFX_TX_BUF_MAP_SINGLE	8	/* buffer was mapped with dma_map_single() */
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145

/**
 * struct efx_tx_queue - An Efx TX queue
 *
 * This is a ring buffer of TX fragments.
 * Since the TX completion path always executes on the same
 * CPU and the xmit path can operate on different CPUs,
 * performance is increased by ensuring that the completion
 * path and the xmit path operate on different cache lines.
 * This is particularly important if the xmit path is always
 * executing on one CPU which is different from the completion
 * path.  There is also a cache line for members which are
 * read but not written on the fast path.
 *
 * @efx: The associated Efx NIC
 * @queue: DMA queue number
 * @channel: The associated channel
146
 * @core_txq: The networking core TX queue structure
147
 * @buffer: The software buffer ring
148
 * @tsoh_page: Array of pages of TSO header buffers
149
 * @txd: The hardware descriptor ring
150
 * @ptr_mask: The size of the ring minus 1.
151
 * @initialised: Has hardware queue been initialised?
152 153
 * @read_count: Current read pointer.
 *	This is the number of buffers that have been removed from both rings.
154 155 156 157 158 159
 * @old_write_count: The value of @write_count when last checked.
 *	This is here for performance reasons.  The xmit path will
 *	only get the up-to-date value of @write_count if this
 *	variable indicates that the queue is empty.  This is to
 *	avoid cache-line ping-pong between the xmit path and the
 *	completion path.
160 161 162 163 164 165 166 167 168 169 170 171
 * @insert_count: Current insert pointer
 *	This is the number of buffers that have been added to the
 *	software ring.
 * @write_count: Current write pointer
 *	This is the number of buffers that have been added to the
 *	hardware ring.
 * @old_read_count: The value of read_count when last checked.
 *	This is here for performance reasons.  The xmit path will
 *	only get the up-to-date value of read_count if this
 *	variable indicates that the queue is full.  This is to
 *	avoid cache-line ping-pong between the xmit path and the
 *	completion path.
B
Ben Hutchings 已提交
172 173 174 175
 * @tso_bursts: Number of times TSO xmit invoked by kernel
 * @tso_long_headers: Number of packets with headers too long for standard
 *	blocks
 * @tso_packets: Number of packets via the TSO xmit path
176 177 178 179
 * @pushes: Number of times the TX push feature has been used
 * @empty_read_count: If the completion path has seen the queue as empty
 *	and the transmission path has not yet checked this, the value of
 *	@read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
180 181 182 183
 */
struct efx_tx_queue {
	/* Members which don't change on the fast path */
	struct efx_nic *efx ____cacheline_aligned_in_smp;
B
Ben Hutchings 已提交
184
	unsigned queue;
185
	struct efx_channel *channel;
186
	struct netdev_queue *core_txq;
187
	struct efx_tx_buffer *buffer;
188
	struct efx_buffer *tsoh_page;
189
	struct efx_special_buffer txd;
190
	unsigned int ptr_mask;
191
	bool initialised;
192 193 194

	/* Members used mainly on the completion path */
	unsigned int read_count ____cacheline_aligned_in_smp;
195
	unsigned int old_write_count;
196 197 198 199 200

	/* Members used only on the xmit path */
	unsigned int insert_count ____cacheline_aligned_in_smp;
	unsigned int write_count;
	unsigned int old_read_count;
B
Ben Hutchings 已提交
201 202 203
	unsigned int tso_bursts;
	unsigned int tso_long_headers;
	unsigned int tso_packets;
204 205 206 207 208
	unsigned int pushes;

	/* Members shared between paths and sometimes updated */
	unsigned int empty_read_count ____cacheline_aligned_in_smp;
#define EFX_EMPTY_COUNT_VALID 0x80000000
209
	atomic_t flush_outstanding;
210 211 212 213 214
};

/**
 * struct efx_rx_buffer - An Efx RX data buffer
 * @dma_addr: DMA base address of the buffer
215
 * @page: The associated page buffer.
216
 *	Will be %NULL if the buffer slot is currently free.
217 218
 * @page_offset: If pending: offset in @page of DMA base address.
 *	If completed: offset in @page of Ethernet header.
219 220
 * @len: If pending: length for DMA descriptor.
 *	If completed: received length, excluding hash prefix.
221 222
 * @flags: Flags for buffer and packet state.  These are only set on the
 *	first buffer of a scattered packet.
223 224 225
 */
struct efx_rx_buffer {
	dma_addr_t dma_addr;
226
	struct page *page;
227 228
	u16 page_offset;
	u16 len;
229
	u16 flags;
230
};
231 232
#define EFX_RX_PKT_CSUMMED	0x0002
#define EFX_RX_PKT_DISCARD	0x0004
233

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
/**
 * struct efx_rx_page_state - Page-based rx buffer state
 *
 * Inserted at the start of every page allocated for receive buffers.
 * Used to facilitate sharing dma mappings between recycled rx buffers
 * and those passed up to the kernel.
 *
 * @refcnt: Number of struct efx_rx_buffer's referencing this page.
 *	When refcnt falls to zero, the page is unmapped for dma
 * @dma_addr: The dma address of this page.
 */
struct efx_rx_page_state {
	unsigned refcnt;
	dma_addr_t dma_addr;

	unsigned int __pad[0] ____cacheline_aligned;
};

252 253 254
/**
 * struct efx_rx_queue - An Efx RX queue
 * @efx: The associated Efx NIC
255 256
 * @core_index:  Index of network core RX queue.  Will be >= 0 iff this
 *	is associated with a real RX queue.
257 258
 * @buffer: The software buffer ring
 * @rxd: The hardware descriptor ring
259
 * @ptr_mask: The size of the ring minus 1.
260 261 262
 * @enabled: Receive queue enabled indicator.
 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
 *	@rxq_flush_pending.
263 264 265
 * @added_count: Number of buffers added to the receive queue.
 * @notified_count: Number of buffers given to NIC (<= @added_count).
 * @removed_count: Number of buffers removed from the receive queue.
266
 * @scatter_n: Number of buffers used by current packet
267 268 269 270 271 272 273 274 275
 * @page_ring: The ring to store DMA mapped pages for reuse.
 * @page_add: Counter to calculate the write pointer for the recycle ring.
 * @page_remove: Counter to calculate the read pointer for the recycle ring.
 * @page_recycle_count: The number of pages that have been recycled.
 * @page_recycle_failed: The number of pages that couldn't be recycled because
 *      the kernel still held a reference to them.
 * @page_recycle_full: The number of pages that were released because the
 *      recycle ring was full.
 * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
276 277 278 279 280 281
 * @max_fill: RX descriptor maximum fill level (<= ring size)
 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
 *	(<= @max_fill)
 * @min_fill: RX descriptor minimum non-zero fill level.
 *	This records the minimum fill level observed when a ring
 *	refill was triggered.
282
 * @recycle_count: RX buffer recycle counter.
283
 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
284 285 286
 */
struct efx_rx_queue {
	struct efx_nic *efx;
287
	int core_index;
288 289
	struct efx_rx_buffer *buffer;
	struct efx_special_buffer rxd;
290
	unsigned int ptr_mask;
291 292
	bool enabled;
	bool flush_pending;
293

294 295 296
	unsigned int added_count;
	unsigned int notified_count;
	unsigned int removed_count;
297
	unsigned int scatter_n;
298 299 300 301 302 303 304
	struct page **page_ring;
	unsigned int page_add;
	unsigned int page_remove;
	unsigned int page_recycle_count;
	unsigned int page_recycle_failed;
	unsigned int page_recycle_full;
	unsigned int page_ptr_mask;
305 306 307 308
	unsigned int max_fill;
	unsigned int fast_fill_trigger;
	unsigned int min_fill;
	unsigned int min_overfill;
309
	unsigned int recycle_count;
310
	struct timer_list slow_fill;
311 312 313 314 315 316 317 318 319
	unsigned int slow_fill_count;
};

/**
 * struct efx_buffer - An Efx general-purpose buffer
 * @addr: host base address of the buffer
 * @dma_addr: DMA base address of the buffer
 * @len: Buffer length, in bytes
 *
320
 * The NIC uses these buffers for its interrupt status registers and
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
 * MAC stats dumps.
 */
struct efx_buffer {
	void *addr;
	dma_addr_t dma_addr;
	unsigned int len;
};


enum efx_rx_alloc_method {
	RX_ALLOC_METHOD_AUTO = 0,
	RX_ALLOC_METHOD_SKB = 1,
	RX_ALLOC_METHOD_PAGE = 2,
};

/**
 * struct efx_channel - An Efx channel
 *
 * A channel comprises an event queue, at least one TX queue, at least
 * one RX queue, and an associated tasklet for processing the event
 * queue.
 *
 * @efx: Associated Efx NIC
 * @channel: Channel instance number
345
 * @type: Channel type definition
346 347
 * @enabled: Channel enabled indicator
 * @irq: IRQ number (MSI and MSI-X only)
348
 * @irq_moderation: IRQ moderation value (in hardware ticks)
349 350 351 352
 * @napi_dev: Net device used with NAPI
 * @napi_str: NAPI control structure
 * @work_pending: Is work pending via NAPI?
 * @eventq: Event queue buffer
353
 * @eventq_mask: Event queue pointer mask
354
 * @eventq_read_ptr: Event queue read pointer
355
 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
356 357
 * @irq_count: Number of IRQs since last adaptive moderation decision
 * @irq_mod_score: IRQ moderation score
358 359 360
 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
B
Ben Hutchings 已提交
361
 * @n_rx_mcast_mismatch: Count of unmatched multicast frames
362 363 364
 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
 * @n_rx_overlength: Count of RX_OVERLENGTH errors
 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
365 366 367 368 369 370
 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
 *	lack of descriptors
 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
 *	__efx_rx_packet(), or zero if there is none
 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
 *	by __efx_rx_packet(), if @rx_pkt_n_frags != 0
371 372
 * @rx_queue: RX queue for this channel
 * @tx_queue: TX queues for this channel
373 374 375 376
 */
struct efx_channel {
	struct efx_nic *efx;
	int channel;
377
	const struct efx_channel_type *type;
378
	bool enabled;
379 380 381 382
	int irq;
	unsigned int irq_moderation;
	struct net_device *napi_dev;
	struct napi_struct napi_str;
383
	bool work_pending;
384
	struct efx_special_buffer eventq;
385
	unsigned int eventq_mask;
386
	unsigned int eventq_read_ptr;
387
	int event_test_cpu;
388

389 390
	unsigned int irq_count;
	unsigned int irq_mod_score;
391 392 393
#ifdef CONFIG_RFS_ACCEL
	unsigned int rfs_filters_added;
#endif
394

395 396 397
	unsigned n_rx_tobe_disc;
	unsigned n_rx_ip_hdr_chksum_err;
	unsigned n_rx_tcp_udp_chksum_err;
B
Ben Hutchings 已提交
398
	unsigned n_rx_mcast_mismatch;
399 400 401
	unsigned n_rx_frm_trunc;
	unsigned n_rx_overlength;
	unsigned n_skbuff_leaks;
402
	unsigned int n_rx_nodesc_trunc;
403

404 405
	unsigned int rx_pkt_n_frags;
	unsigned int rx_pkt_index;
406

407
	struct efx_rx_queue rx_queue;
408
	struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
409 410
};

411 412 413 414 415 416 417 418 419
/**
 * struct efx_channel_type - distinguishes traffic and extra channels
 * @handle_no_channel: Handle failure to allocate an extra channel
 * @pre_probe: Set up extra state prior to initialisation
 * @post_remove: Tear down extra state after finalisation, if allocated.
 *	May be called on channels that have not been probed.
 * @get_name: Generate the channel's name (used for its IRQ handler)
 * @copy: Copy the channel state prior to reallocation.  May be %NULL if
 *	reallocation is not supported.
420
 * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
421 422 423 424 425 426
 * @keep_eventq: Flag for whether event queue should be kept initialised
 *	while the device is stopped
 */
struct efx_channel_type {
	void (*handle_no_channel)(struct efx_nic *);
	int (*pre_probe)(struct efx_channel *);
427
	void (*post_remove)(struct efx_channel *);
428 429
	void (*get_name)(struct efx_channel *, char *buf, size_t len);
	struct efx_channel *(*copy)(const struct efx_channel *);
430
	bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
431 432 433
	bool keep_eventq;
};

434 435 436 437 438 439
enum efx_led_mode {
	EFX_LED_OFF	= 0,
	EFX_LED_ON	= 1,
	EFX_LED_DEFAULT	= 2
};

440 441 442
#define STRING_TABLE_LOOKUP(val, member) \
	((val) < member ## _max) ? member ## _names[val] : "(invalid)"

443
extern const char *const efx_loopback_mode_names[];
444 445 446 447
extern const unsigned int efx_loopback_mode_max;
#define LOOPBACK_MODE(efx) \
	STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)

448
extern const char *const efx_reset_type_names[];
449 450 451
extern const unsigned int efx_reset_type_max;
#define RESET_TYPE(type) \
	STRING_TABLE_LOOKUP(type, efx_reset_type)
452

453 454 455 456 457 458 459 460 461 462
enum efx_int_mode {
	/* Be careful if altering to correct macro below */
	EFX_INT_MODE_MSIX = 0,
	EFX_INT_MODE_MSI = 1,
	EFX_INT_MODE_LEGACY = 2,
	EFX_INT_MODE_MAX	/* Insert any new items before this */
};
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)

enum nic_state {
463 464 465
	STATE_UNINIT = 0,	/* device being probed/removed or is frozen */
	STATE_READY = 1,	/* hardware ready and netdev registered */
	STATE_DISABLED = 2,	/* device disabled due to hardware errors */
466
	STATE_RECOVERY = 3,	/* device recovering from PCI error */
467 468 469 470 471 472 473 474 475
};

/*
 * Alignment of page-allocated RX buffers
 *
 * Controls the number of bytes inserted at the start of an RX buffer.
 * This is the equivalent of NET_IP_ALIGN [which controls the alignment
 * of the skb->head for hardware DMA].
 */
476
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
#define EFX_PAGE_IP_ALIGN 0
#else
#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
#endif

/*
 * Alignment of the skb->head which wraps a page-allocated RX buffer
 *
 * The skb allocated to wrap an rx_buffer can have this alignment. Since
 * the data is memcpy'd from the rx_buf, it does not need to be equal to
 * EFX_PAGE_IP_ALIGN.
 */
#define EFX_PAGE_SKB_ALIGN 2

/* Forward declaration */
struct efx_nic;

/* Pseudo bit-mask flow control field */
495 496 497
#define EFX_FC_RX	FLOW_CTRL_RX
#define EFX_FC_TX	FLOW_CTRL_TX
#define EFX_FC_AUTO	4
498

499 500 501 502 503 504 505 506 507 508
/**
 * struct efx_link_state - Current state of the link
 * @up: Link is up
 * @fd: Link is full-duplex
 * @fc: Actual flow control flags
 * @speed: Link speed (Mbps)
 */
struct efx_link_state {
	bool up;
	bool fd;
509
	u8 fc;
510 511 512
	unsigned int speed;
};

S
Steve Hodgson 已提交
513 514 515 516 517 518 519
static inline bool efx_link_state_equal(const struct efx_link_state *left,
					const struct efx_link_state *right)
{
	return left->up == right->up && left->fd == right->fd &&
		left->fc == right->fc && left->speed == right->speed;
}

520 521
/**
 * struct efx_phy_operations - Efx PHY operations table
522 523
 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
 *	efx->loopback_modes.
524 525 526
 * @init: Initialise PHY
 * @fini: Shut down PHY
 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
S
Steve Hodgson 已提交
527 528
 * @poll: Update @link_state and report whether it changed.
 *	Serialised by the mac_lock.
529 530
 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
531
 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
B
Ben Hutchings 已提交
532
 *	(only needed where AN bit is set in mmds)
533
 * @test_alive: Test that PHY is 'alive' (online)
534
 * @test_name: Get the name of a PHY-specific test/result
535
 * @run_tests: Run tests and record results as appropriate (offline).
536
 *	Flags are the ethtool tests flags.
537 538
 */
struct efx_phy_operations {
539
	int (*probe) (struct efx_nic *efx);
540 541
	int (*init) (struct efx_nic *efx);
	void (*fini) (struct efx_nic *efx);
542
	void (*remove) (struct efx_nic *efx);
B
Ben Hutchings 已提交
543
	int (*reconfigure) (struct efx_nic *efx);
S
Steve Hodgson 已提交
544
	bool (*poll) (struct efx_nic *efx);
545 546 547 548
	void (*get_settings) (struct efx_nic *efx,
			      struct ethtool_cmd *ecmd);
	int (*set_settings) (struct efx_nic *efx,
			     struct ethtool_cmd *ecmd);
549
	void (*set_npage_adv) (struct efx_nic *efx, u32);
550
	int (*test_alive) (struct efx_nic *efx);
551
	const char *(*test_name) (struct efx_nic *efx, unsigned int index);
552
	int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
553 554 555 556 557
	int (*get_module_eeprom) (struct efx_nic *efx,
			       struct ethtool_eeprom *ee,
			       u8 *data);
	int (*get_module_info) (struct efx_nic *efx,
				struct ethtool_modinfo *modinfo);
558 559
};

560
/**
561
 * enum efx_phy_mode - PHY operating mode flags
562 563
 * @PHY_MODE_NORMAL: on and should pass traffic
 * @PHY_MODE_TX_DISABLED: on with TX disabled
564 565
 * @PHY_MODE_LOW_POWER: set to low power through MDIO
 * @PHY_MODE_OFF: switched off through external control
566 567 568 569 570
 * @PHY_MODE_SPECIAL: on but will not pass traffic
 */
enum efx_phy_mode {
	PHY_MODE_NORMAL		= 0,
	PHY_MODE_TX_DISABLED	= 1,
571 572
	PHY_MODE_LOW_POWER	= 2,
	PHY_MODE_OFF		= 4,
573 574 575 576 577
	PHY_MODE_SPECIAL	= 8,
};

static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
{
B
Ben Hutchings 已提交
578
	return !!(mode & ~PHY_MODE_TX_DISABLED);
579 580
}

581 582 583 584 585 586 587 588 589 590 591
/*
 * Efx extended statistics
 *
 * Not all statistics are provided by all supported MACs.  The purpose
 * is this structure is to contain the raw statistics provided by each
 * MAC.
 */
struct efx_mac_stats {
	u64 tx_bytes;
	u64 tx_good_bytes;
	u64 tx_bad_bytes;
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
	u64 tx_packets;
	u64 tx_bad;
	u64 tx_pause;
	u64 tx_control;
	u64 tx_unicast;
	u64 tx_multicast;
	u64 tx_broadcast;
	u64 tx_lt64;
	u64 tx_64;
	u64 tx_65_to_127;
	u64 tx_128_to_255;
	u64 tx_256_to_511;
	u64 tx_512_to_1023;
	u64 tx_1024_to_15xx;
	u64 tx_15xx_to_jumbo;
	u64 tx_gtjumbo;
	u64 tx_collision;
	u64 tx_single_collision;
	u64 tx_multiple_collision;
	u64 tx_excessive_collision;
	u64 tx_deferred;
	u64 tx_late_collision;
	u64 tx_excessive_deferred;
	u64 tx_non_tcpudp;
	u64 tx_mac_src_error;
	u64 tx_ip_src_error;
618 619 620
	u64 rx_bytes;
	u64 rx_good_bytes;
	u64 rx_bad_bytes;
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
	u64 rx_packets;
	u64 rx_good;
	u64 rx_bad;
	u64 rx_pause;
	u64 rx_control;
	u64 rx_unicast;
	u64 rx_multicast;
	u64 rx_broadcast;
	u64 rx_lt64;
	u64 rx_64;
	u64 rx_65_to_127;
	u64 rx_128_to_255;
	u64 rx_256_to_511;
	u64 rx_512_to_1023;
	u64 rx_1024_to_15xx;
	u64 rx_15xx_to_jumbo;
	u64 rx_gtjumbo;
	u64 rx_bad_lt64;
	u64 rx_bad_64_to_15xx;
	u64 rx_bad_15xx_to_jumbo;
	u64 rx_bad_gtjumbo;
	u64 rx_overflow;
	u64 rx_missed;
	u64 rx_false_carrier;
	u64 rx_symbol_error;
	u64 rx_align_error;
	u64 rx_length_error;
	u64 rx_internal_error;
	u64 rx_good_lt64;
650 651 652 653 654 655 656 657 658 659 660 661 662 663
};

/* Number of bits used in a multicast filter hash address */
#define EFX_MCAST_HASH_BITS 8

/* Number of (single-bit) entries in a multicast filter hash */
#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)

/* An Efx multicast filter hash */
union efx_multicast_hash {
	u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
	efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};

B
Ben Hutchings 已提交
664
struct efx_filter_state;
665 666
struct efx_vf;
struct vfdi_status;
B
Ben Hutchings 已提交
667

668 669 670 671 672 673
/**
 * struct efx_nic - an Efx NIC
 * @name: Device name (net device name or bus id before net device registered)
 * @pci_dev: The PCI device
 * @type: Controller type attributes
 * @legacy_irq: IRQ number
674
 * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
675 676
 * @workqueue: Workqueue for port reconfigures and the HW monitor.
 *	Work items do not hold and must not acquire RTNL.
677
 * @workqueue_name: Name of workqueue
678 679 680 681
 * @reset_work: Scheduled reset workitem
 * @membase_phys: Memory BAR value as physical address
 * @membase: Memory BAR value
 * @interrupt_mode: Interrupt mode
682
 * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
683 684
 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
 * @irq_rx_moderation: IRQ moderation time for RX event queues
685
 * @msg_enable: Log message enable flags
686
 * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
687
 * @reset_pending: Bitmask for pending resets
688 689 690
 * @tx_queue: TX DMA queues
 * @rx_queue: RX DMA queues
 * @channel: Channels
691
 * @channel_name: Names for channels and their IRQs
692 693
 * @extra_channel_types: Types of extra (non-traffic) channels that
 *	should be allocated for this NIC
694 695
 * @rxq_entries: Size of receive queues requested by user.
 * @txq_entries: Size of transmit queues requested by user.
696 697
 * @txq_stop_thresh: TX queue fill level at or above which we stop it.
 * @txq_wake_thresh: TX queue fill level at or below which we wake it.
698 699 700
 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
 * @sram_lim_qw: Qword address limit of SRAM
701
 * @next_buffer_table: First available buffer table id
702
 * @n_channels: Number of channels in use
B
Ben Hutchings 已提交
703 704
 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
 * @n_tx_channels: Number of channels used for TX
705
 * @rx_dma_len: Current maximum RX DMA length
706
 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
707 708
 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
 *	for use in sk_buff::truesize
709
 * @rx_hash_key: Toeplitz hash key for RSS
710
 * @rx_indir_table: Indirection table for RSS
711
 * @rx_scatter: Scatter mode enabled for receives
712 713
 * @int_error_count: Number of internal errors seen recently
 * @int_error_expire: Time at which error count will be expired
714
 * @irq_status: Interrupt status buffer
715
 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
716
 * @irq_level: IRQ level/index for IRQs not triggered by an event queue
717
 * @selftest_work: Work item for asynchronous self-test
718
 * @mtd_list: List of MTDs attached to the NIC
L
Lucas De Marchi 已提交
719
 * @nic_data: Hardware dependent state
B
Ben Hutchings 已提交
720
 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
721
 *	efx_monitor() and efx_reconfigure_port()
722
 * @port_enabled: Port enabled indicator.
S
Steve Hodgson 已提交
723 724 725 726
 *	Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
 *	efx_mac_work() with kernel interfaces. Safe to read under any
 *	one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
 *	be held to modify it.
727 728 729 730 731 732
 * @port_initialized: Port initialized?
 * @net_dev: Operating system network device. Consider holding the rtnl lock
 * @stats_buffer: DMA buffer for statistics
 * @phy_type: PHY type
 * @phy_op: PHY interface
 * @phy_data: PHY private data (including PHY-specific stats)
733
 * @mdio: PHY MDIO interface
734
 * @mdio_bus: PHY MDIO bus ID (only used by Siena)
B
Ben Hutchings 已提交
735
 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
B
Ben Hutchings 已提交
736
 * @link_advertising: Autonegotiation advertising flags
737
 * @link_state: Current state of the link
738 739 740
 * @n_link_state_changes: Number of times the link has changed state
 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
 * @multicast_hash: Multicast hash table
B
Ben Hutchings 已提交
741
 * @wanted_fc: Wanted flow control flags
742 743 744
 * @fc_disable: When non-zero flow control is disabled. Typically used to
 *	ensure that network back pressure doesn't delay dma queue flushes.
 *	Serialised by the rtnl lock.
745
 * @mac_work: Work item for changing MAC promiscuity and multicast hash
746 747 748
 * @loopback_mode: Loopback status
 * @loopback_modes: Supported loopback mode bitmask
 * @loopback_selftest: Offline self-test private state
749 750 751 752 753 754 755
 * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
 *	Decremented when the efx_flush_rx_queue() is called.
 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
 *	completed (either success or failure). Not used when MCDI is used to
 *	flush receive queues.
 * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
756 757 758 759 760 761 762 763 764 765 766
 * @vf: Array of &struct efx_vf objects.
 * @vf_count: Number of VFs intended to be enabled.
 * @vf_init_count: Number of VFs that have been fully initialised.
 * @vi_scale: log2 number of vnics per VF.
 * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
 * @vfdi_status: Common VFDI status page to be dmad to VF address space.
 * @local_addr_list: List of local addresses. Protected by %local_lock.
 * @local_page_list: List of DMA addressable pages used to broadcast
 *	%local_addr_list. Protected by %local_lock.
 * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
 * @peer_work: Work item to broadcast peer addresses to VMs.
767
 * @ptp_data: PTP state data
768 769
 * @monitor_work: Hardware monitor workitem
 * @biu_lock: BIU (bus interface unit) lock
770 771 772
 * @last_irq_cpu: Last CPU to handle a possible test interrupt.  This
 *	field is used by efx_test_interrupts() to verify that an
 *	interrupt has occurred.
773 774 775 776 777
 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
 * @mac_stats: MAC statistics. These include all statistics the MACs
 *	can provide.  Generic code converts these into a standard
 *	&struct net_device_stats.
 * @stats_lock: Statistics update lock. Serialises statistics fetches
778
 *	and access to @mac_stats.
779
 *
780
 * This is stored in the private area of the &struct net_device.
781 782
 */
struct efx_nic {
783 784
	/* The following fields should be written very rarely */

785 786 787 788
	char name[IFNAMSIZ];
	struct pci_dev *pci_dev;
	const struct efx_nic_type *type;
	int legacy_irq;
789
	bool legacy_irq_enabled;
790
	struct workqueue_struct *workqueue;
791
	char workqueue_name[16];
792
	struct work_struct reset_work;
793
	resource_size_t membase_phys;
794
	void __iomem *membase;
795

796
	enum efx_int_mode interrupt_mode;
797
	unsigned int timer_quantum_ns;
798 799
	bool irq_rx_adaptive;
	unsigned int irq_rx_moderation;
800
	u32 msg_enable;
801 802

	enum nic_state state;
803
	unsigned long reset_pending;
804

805
	struct efx_channel *channel[EFX_MAX_CHANNELS];
806
	char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
807 808
	const struct efx_channel_type *
	extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
809

810 811
	unsigned rxq_entries;
	unsigned txq_entries;
812 813 814
	unsigned int txq_stop_thresh;
	unsigned int txq_wake_thresh;

815 816 817
	unsigned tx_dc_base;
	unsigned rx_dc_base;
	unsigned sram_lim_qw;
818
	unsigned next_buffer_table;
B
Ben Hutchings 已提交
819 820
	unsigned n_channels;
	unsigned n_rx_channels;
821
	unsigned rss_spread;
822
	unsigned tx_channel_offset;
B
Ben Hutchings 已提交
823
	unsigned n_tx_channels;
824
	unsigned int rx_dma_len;
825
	unsigned int rx_buffer_order;
826
	unsigned int rx_buffer_truesize;
827
	unsigned int rx_bufs_per_page;
828
	u8 rx_hash_key[40];
829
	u32 rx_indir_table[128];
830
	bool rx_scatter;
831

832 833 834
	unsigned int_error_count;
	unsigned long int_error_expire;

835
	struct efx_buffer irq_status;
836
	unsigned irq_zero_count;
837
	unsigned irq_level;
838
	struct delayed_work selftest_work;
839

840 841 842
#ifdef CONFIG_SFC_MTD
	struct list_head mtd_list;
#endif
843

844
	void *nic_data;
845 846

	struct mutex mac_lock;
847
	struct work_struct mac_work;
848
	bool port_enabled;
849

850
	bool port_initialized;
851 852 853 854
	struct net_device *net_dev;

	struct efx_buffer stats_buffer;

855
	unsigned int phy_type;
856
	const struct efx_phy_operations *phy_op;
857
	void *phy_data;
858
	struct mdio_if_info mdio;
859
	unsigned int mdio_bus;
860
	enum efx_phy_mode phy_mode;
861

B
Ben Hutchings 已提交
862
	u32 link_advertising;
863
	struct efx_link_state link_state;
864 865
	unsigned int n_link_state_changes;

866
	bool promiscuous;
867
	union efx_multicast_hash multicast_hash;
868
	u8 wanted_fc;
869
	unsigned fc_disable;
870 871

	atomic_t rx_reset;
872
	enum efx_loopback_mode loopback_mode;
873
	u64 loopback_modes;
874 875

	void *loopback_selftest;
B
Ben Hutchings 已提交
876 877

	struct efx_filter_state *filter_state;
878

879 880 881 882 883
	atomic_t drain_pending;
	atomic_t rxq_flush_pending;
	atomic_t rxq_flush_outstanding;
	wait_queue_head_t flush_wq;

884 885 886 887 888 889 890 891 892 893 894 895 896 897
#ifdef CONFIG_SFC_SRIOV
	struct efx_channel *vfdi_channel;
	struct efx_vf *vf;
	unsigned vf_count;
	unsigned vf_init_count;
	unsigned vi_scale;
	unsigned vf_buftbl_base;
	struct efx_buffer vfdi_status;
	struct list_head local_addr_list;
	struct list_head local_page_list;
	struct mutex local_lock;
	struct work_struct peer_work;
#endif

898 899
	struct efx_ptp_data *ptp_data;

900 901 902 903
	/* The following fields may be written more often */

	struct delayed_work monitor_work ____cacheline_aligned_in_smp;
	spinlock_t biu_lock;
904
	int last_irq_cpu;
905 906 907
	unsigned n_rx_nodesc_drop_cnt;
	struct efx_mac_stats mac_stats;
	spinlock_t stats_lock;
908 909
};

910 911 912 913 914
static inline int efx_dev_registered(struct efx_nic *efx)
{
	return efx->net_dev->reg_state == NETREG_REGISTERED;
}

915 916
static inline unsigned int efx_port_num(struct efx_nic *efx)
{
917
	return efx->net_dev->dev_id;
918 919
}

920 921
/**
 * struct efx_nic_type - Efx device type definition
922 923 924
 * @probe: Probe the controller
 * @remove: Free resources allocated by probe()
 * @init: Initialise the controller
925 926
 * @dimension_resources: Dimension controller resources (buffer table,
 *	and VIs once the available interrupt resources are clear)
927 928
 * @fini: Shut down the controller
 * @monitor: Periodic function for polling link state and hardware monitor
929 930
 * @map_reset_reason: Map ethtool reset reason to a reset method
 * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
931 932 933 934
 * @reset: Reset the controller hardware and possibly the PHY.  This will
 *	be called while the controller is uninitialised.
 * @probe_port: Probe the MAC and PHY
 * @remove_port: Free resources allocated by probe_port()
935
 * @handle_global_event: Handle a "global" event (may be %NULL)
936
 * @prepare_flush: Prepare the hardware for flushing the DMA queues
937
 * @finish_flush: Clean up after flushing the DMA queues
938 939 940
 * @update_stats: Update statistics not provided by event handling
 * @start_stats: Start the regular fetching of statistics
 * @stop_stats: Stop the regular fetching of statistics
941
 * @set_id_led: Set state of identifying LED or revert to automatic function
942
 * @push_irq_moderation: Apply interrupt moderation value
B
Ben Hutchings 已提交
943
 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
944 945
 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
 *	to the hardware.  Serialised by the mac_lock.
946
 * @check_mac_fault: Check MAC fault state. True if fault present.
947 948 949
 * @get_wol: Get WoL configuration from driver state
 * @set_wol: Push WoL configuration to the NIC
 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
950 951
 * @test_chip: Test registers.  Should use efx_nic_test_registers(), and is
 *	expected to reset the NIC.
952
 * @test_nvram: Test validity of NVRAM contents
953
 * @revision: Hardware architecture revision
954 955 956 957 958 959 960
 * @mem_map_size: Memory BAR mapped size
 * @txd_ptr_tbl_base: TX descriptor ring base address
 * @rxd_ptr_tbl_base: RX descriptor ring base address
 * @buf_tbl_base: Buffer table base address
 * @evq_ptr_tbl_base: Event queue pointer table base address
 * @evq_rptr_tbl_base: Event queue read-pointer table base address
 * @max_dma_mask: Maximum possible DMA mask
961 962 963
 * @rx_buffer_hash_size: Size of hash at start of RX packet
 * @rx_buffer_padding: Size of padding at end of RX packet
 * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
964 965 966 967
 * @max_interrupt_mode: Highest capability interrupt mode supported
 *	from &enum efx_init_mode.
 * @phys_addr_channels: Number of channels with physically addressed
 *	descriptors
968
 * @timer_period_max: Maximum period of interrupt timer (in ticks)
969 970
 * @offload_features: net_device feature flags for protocol offload
 *	features implemented in hardware
971 972
 */
struct efx_nic_type {
973 974 975
	int (*probe)(struct efx_nic *efx);
	void (*remove)(struct efx_nic *efx);
	int (*init)(struct efx_nic *efx);
976
	void (*dimension_resources)(struct efx_nic *efx);
977 978
	void (*fini)(struct efx_nic *efx);
	void (*monitor)(struct efx_nic *efx);
979 980
	enum reset_type (*map_reset_reason)(enum reset_type reason);
	int (*map_reset_flags)(u32 *flags);
981 982 983
	int (*reset)(struct efx_nic *efx, enum reset_type method);
	int (*probe_port)(struct efx_nic *efx);
	void (*remove_port)(struct efx_nic *efx);
984
	bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
985
	void (*prepare_flush)(struct efx_nic *efx);
986
	void (*finish_flush)(struct efx_nic *efx);
987 988 989
	void (*update_stats)(struct efx_nic *efx);
	void (*start_stats)(struct efx_nic *efx);
	void (*stop_stats)(struct efx_nic *efx);
990
	void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
991
	void (*push_irq_moderation)(struct efx_channel *channel);
B
Ben Hutchings 已提交
992
	int (*reconfigure_port)(struct efx_nic *efx);
993 994
	int (*reconfigure_mac)(struct efx_nic *efx);
	bool (*check_mac_fault)(struct efx_nic *efx);
995 996 997
	void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
	int (*set_wol)(struct efx_nic *efx, u32 type);
	void (*resume_wol)(struct efx_nic *efx);
998
	int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
999
	int (*test_nvram)(struct efx_nic *efx);
1000

1001
	int revision;
1002 1003 1004 1005 1006 1007
	unsigned int mem_map_size;
	unsigned int txd_ptr_tbl_base;
	unsigned int rxd_ptr_tbl_base;
	unsigned int buf_tbl_base;
	unsigned int evq_ptr_tbl_base;
	unsigned int evq_rptr_tbl_base;
1008
	u64 max_dma_mask;
1009
	unsigned int rx_buffer_hash_size;
1010
	unsigned int rx_buffer_padding;
1011
	bool can_rx_scatter;
1012 1013
	unsigned int max_interrupt_mode;
	unsigned int phys_addr_channels;
1014
	unsigned int timer_period_max;
1015
	netdev_features_t offload_features;
1016 1017 1018 1019 1020 1021 1022 1023
};

/**************************************************************************
 *
 * Prototypes and inline functions
 *
 *************************************************************************/

1024 1025 1026 1027
static inline struct efx_channel *
efx_get_channel(struct efx_nic *efx, unsigned index)
{
	EFX_BUG_ON_PARANOID(index >= efx->n_channels);
1028
	return efx->channel[index];
1029 1030
}

1031 1032
/* Iterate over all used channels */
#define efx_for_each_channel(_channel, _efx)				\
1033 1034 1035 1036
	for (_channel = (_efx)->channel[0];				\
	     _channel;							\
	     _channel = (_channel->channel + 1 < (_efx)->n_channels) ?	\
		     (_efx)->channel[_channel->channel + 1] : NULL)
1037

1038 1039 1040 1041 1042 1043 1044
/* Iterate over all used channels in reverse */
#define efx_for_each_channel_rev(_channel, _efx)			\
	for (_channel = (_efx)->channel[(_efx)->n_channels - 1];	\
	     _channel;							\
	     _channel = _channel->channel ?				\
		     (_efx)->channel[_channel->channel - 1] : NULL)

1045 1046 1047 1048 1049 1050 1051
static inline struct efx_tx_queue *
efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
{
	EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
			    type >= EFX_TXQ_TYPES);
	return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
}
1052

1053 1054 1055 1056 1057 1058
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
	return channel->channel - channel->efx->tx_channel_offset <
		channel->efx->n_tx_channels;
}

1059 1060 1061
static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{
1062 1063 1064
	EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
			    type >= EFX_TXQ_TYPES);
	return &channel->tx_queue[type];
1065
}
1066

1067 1068 1069 1070 1071 1072
static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
{
	return !(tx_queue->efx->net_dev->num_tc < 2 &&
		 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
}

1073 1074
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel)		\
1075 1076 1077 1078
	if (!efx_channel_has_tx_queues(_channel))			\
		;							\
	else								\
		for (_tx_queue = (_channel)->tx_queue;			\
1079 1080
		     _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
			     efx_tx_queue_used(_tx_queue);		\
1081
		     _tx_queue++)
1082

1083 1084
/* Iterate over all possible TX queues belonging to a channel */
#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel)	\
1085 1086 1087 1088 1089 1090
	if (!efx_channel_has_tx_queues(_channel))			\
		;							\
	else								\
		for (_tx_queue = (_channel)->tx_queue;			\
		     _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES;	\
		     _tx_queue++)
1091

1092 1093
static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
{
1094
	return channel->rx_queue.core_index >= 0;
1095 1096
}

1097 1098 1099
static inline struct efx_rx_queue *
efx_channel_get_rx_queue(struct efx_channel *channel)
{
1100 1101
	EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
	return &channel->rx_queue;
1102 1103
}

1104 1105
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel)		\
1106 1107 1108 1109 1110 1111
	if (!efx_channel_has_rx_queue(_channel))			\
		;							\
	else								\
		for (_rx_queue = &(_channel)->rx_queue;			\
		     _rx_queue;						\
		     _rx_queue = NULL)
1112

1113 1114 1115
static inline struct efx_channel *
efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
{
1116
	return container_of(rx_queue, struct efx_channel, rx_queue);
1117 1118 1119 1120
}

static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
{
1121
	return efx_rx_queue_channel(rx_queue)->channel;
1122 1123
}

1124 1125 1126 1127 1128 1129
/* Returns a pointer to the specified receive buffer in the RX
 * descriptor queue.
 */
static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
						  unsigned int index)
{
1130
	return &rx_queue->buffer[index];
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
}


/**
 * EFX_MAX_FRAME_LEN - calculate maximum frame length
 *
 * This calculates the maximum frame length that will be used for a
 * given MTU.  The frame length will be equal to the MTU plus a
 * constant amount of header space and padding.  This is the quantity
 * that the net driver will program into the MAC as the maximum frame
 * length.
 *
1143
 * The 10G MAC requires 8-byte alignment on the frame
1144
 * length, so we round up to the nearest 8.
1145 1146 1147 1148 1149
 *
 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
 * XGMII cycle).  If the frame length reaches the maximum value in the
 * same cycle, the XMAC can miss the IPG altogether.  We work around
 * this by adding a further 16 bytes.
1150 1151
 */
#define EFX_MAX_FRAME_LEN(mtu) \
1152
	((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
1153

1154 1155 1156 1157 1158 1159 1160 1161
static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
{
	return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
}
static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
{
	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
}
1162 1163

#endif /* EFX_NET_DRIVER_H */