net_driver.h 54.1 KB
Newer Older
1
/****************************************************************************
B
Ben Hutchings 已提交
2
 * Driver for Solarflare network controllers and boards
3
 * Copyright 2005-2006 Fen Systems Ltd.
B
Ben Hutchings 已提交
4
 * Copyright 2005-2013 Solarflare Communications Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

/* Common definitions for all Efx net driver code */

#ifndef EFX_NET_DRIVER_H
#define EFX_NET_DRIVER_H

#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
20
#include <linux/timer.h>
21
#include <linux/mdio.h>
22 23 24 25 26
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
27
#include <linux/mutex.h>
28
#include <linux/vmalloc.h>
29
#include <linux/i2c.h>
30
#include <linux/mtd/mtd.h>
31
#include <net/busy_poll.h>
32 33 34

#include "enum.h"
#include "bitfield.h"
35
#include "filter.h"
36 37 38 39 40 41

/**************************************************************************
 *
 * Build definitions
 *
 **************************************************************************/
42

43
#define EFX_DRIVER_VERSION	"4.0"
44

45
#ifdef DEBUG
46 47 48 49 50 51 52 53 54 55 56 57 58
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
#else
#define EFX_BUG_ON_PARANOID(x) do {} while (0)
#define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif

/**************************************************************************
 *
 * Efx data structures
 *
 **************************************************************************/

59
#define EFX_MAX_CHANNELS 32U
60
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
61
#define EFX_EXTRA_CHANNEL_IOV	0
62 63
#define EFX_EXTRA_CHANNEL_PTP	1
#define EFX_MAX_EXTRA_CHANNELS	2U
64

B
Ben Hutchings 已提交
65 66 67
/* Checksum generation is a per-queue option in hardware, so each
 * queue visible to the networking core is backed by two hardware TX
 * queues. */
68 69 70 71 72 73
#define EFX_MAX_TX_TC		2
#define EFX_MAX_CORE_TX_QUEUES	(EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
#define EFX_TXQ_TYPE_OFFLOAD	1	/* flag */
#define EFX_TXQ_TYPE_HIGHPRI	2	/* flag */
#define EFX_TXQ_TYPES		4
#define EFX_MAX_TX_QUEUES	(EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
74

75 76 77
/* Maximum possible MTU the driver supports */
#define EFX_MAX_MTU (9 * 1024)

78 79 80 81 82 83 84 85 86 87 88 89 90 91
/* Size of an RX scatter buffer.  Small enough to pack 2 into a 4K page,
 * and should be a multiple of the cache line size.
 */
#define EFX_RX_USR_BUF_SIZE	(2048 - 256)

/* If possible, we should ensure cache line alignment at start and end
 * of every buffer.  Otherwise, we just need to ensure 4-byte
 * alignment of the network header.
 */
#if NET_IP_ALIGN == 0
#define EFX_RX_BUF_ALIGNMENT	L1_CACHE_BYTES
#else
#define EFX_RX_BUF_ALIGNMENT	4
#endif
92

93 94
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
95
struct hwtstamp_config;
96

97 98
struct efx_self_tests;

99
/**
100 101
 * struct efx_buffer - A general-purpose DMA buffer
 * @addr: host base address of the buffer
102 103 104
 * @dma_addr: DMA base address of the buffer
 * @len: Buffer length, in bytes
 *
105 106
 * The NIC uses these buffers for its interrupt status registers and
 * MAC stats dumps.
107
 */
108
struct efx_buffer {
109 110 111
	void *addr;
	dma_addr_t dma_addr;
	unsigned int len;
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
};

/**
 * struct efx_special_buffer - DMA buffer entered into buffer table
 * @buf: Standard &struct efx_buffer
 * @index: Buffer index within controller;s buffer table
 * @entries: Number of buffer table entries
 *
 * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
 * Event and descriptor rings are addressed via one or more buffer
 * table entries (and so can be physically non-contiguous, although we
 * currently do not take advantage of that).  On Falcon and Siena we
 * have to take care of allocating and initialising the entries
 * ourselves.  On later hardware this is managed by the firmware and
 * @index and @entries are left as 0.
 */
struct efx_special_buffer {
	struct efx_buffer buf;
130 131
	unsigned int index;
	unsigned int entries;
132 133 134
};

/**
135 136 137
 * struct efx_tx_buffer - buffer state for a TX descriptor
 * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
 *	freed when descriptor completes
138 139
 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
 *	freed when descriptor completes.
140
 * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
141
 * @dma_addr: DMA address of the fragment.
142
 * @flags: Flags for allocation and DMA mapping type
143 144 145
 * @len: Length of this fragment.
 *	This field is zero when the queue slot is empty.
 * @unmap_len: Length of this fragment to unmap
146 147
 * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
 * Only valid if @unmap_len != 0.
148 149
 */
struct efx_tx_buffer {
150 151
	union {
		const struct sk_buff *skb;
152
		void *heap_buf;
153
	};
154 155 156 157
	union {
		efx_qword_t option;
		dma_addr_t dma_addr;
	};
158
	unsigned short flags;
159 160
	unsigned short len;
	unsigned short unmap_len;
161
	unsigned short dma_offset;
162
};
163 164
#define EFX_TX_BUF_CONT		1	/* not last descriptor of packet */
#define EFX_TX_BUF_SKB		2	/* buffer is last part of skb */
165
#define EFX_TX_BUF_HEAP		4	/* buffer was allocated with kmalloc() */
166
#define EFX_TX_BUF_MAP_SINGLE	8	/* buffer was mapped with dma_map_single() */
167
#define EFX_TX_BUF_OPTION	0x10	/* empty buffer for option descriptor */
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184

/**
 * struct efx_tx_queue - An Efx TX queue
 *
 * This is a ring buffer of TX fragments.
 * Since the TX completion path always executes on the same
 * CPU and the xmit path can operate on different CPUs,
 * performance is increased by ensuring that the completion
 * path and the xmit path operate on different cache lines.
 * This is particularly important if the xmit path is always
 * executing on one CPU which is different from the completion
 * path.  There is also a cache line for members which are
 * read but not written on the fast path.
 *
 * @efx: The associated Efx NIC
 * @queue: DMA queue number
 * @channel: The associated channel
185
 * @core_txq: The networking core TX queue structure
186
 * @buffer: The software buffer ring
187
 * @tsoh_page: Array of pages of TSO header buffers
188
 * @txd: The hardware descriptor ring
189
 * @ptr_mask: The size of the ring minus 1.
190 191 192
 * @piobuf: PIO buffer region for this TX queue (shared with its partner).
 *	Size of the region is efx_piobuf_size.
 * @piobuf_offset: Buffer offset to be specified in PIO descriptors
193
 * @initialised: Has hardware queue been initialised?
194 195
 * @read_count: Current read pointer.
 *	This is the number of buffers that have been removed from both rings.
196 197 198 199 200 201
 * @old_write_count: The value of @write_count when last checked.
 *	This is here for performance reasons.  The xmit path will
 *	only get the up-to-date value of @write_count if this
 *	variable indicates that the queue is empty.  This is to
 *	avoid cache-line ping-pong between the xmit path and the
 *	completion path.
202
 * @merge_events: Number of TX merged completion events
203 204 205 206 207 208 209 210 211 212 213 214
 * @insert_count: Current insert pointer
 *	This is the number of buffers that have been added to the
 *	software ring.
 * @write_count: Current write pointer
 *	This is the number of buffers that have been added to the
 *	hardware ring.
 * @old_read_count: The value of read_count when last checked.
 *	This is here for performance reasons.  The xmit path will
 *	only get the up-to-date value of read_count if this
 *	variable indicates that the queue is full.  This is to
 *	avoid cache-line ping-pong between the xmit path and the
 *	completion path.
B
Ben Hutchings 已提交
215 216 217 218
 * @tso_bursts: Number of times TSO xmit invoked by kernel
 * @tso_long_headers: Number of packets with headers too long for standard
 *	blocks
 * @tso_packets: Number of packets via the TSO xmit path
219
 * @pushes: Number of times the TX push feature has been used
220
 * @pio_packets: Number of times the TX PIO feature has been used
221 222 223
 * @empty_read_count: If the completion path has seen the queue as empty
 *	and the transmission path has not yet checked this, the value of
 *	@read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
224 225 226 227
 */
struct efx_tx_queue {
	/* Members which don't change on the fast path */
	struct efx_nic *efx ____cacheline_aligned_in_smp;
B
Ben Hutchings 已提交
228
	unsigned queue;
229
	struct efx_channel *channel;
230
	struct netdev_queue *core_txq;
231
	struct efx_tx_buffer *buffer;
232
	struct efx_buffer *tsoh_page;
233
	struct efx_special_buffer txd;
234
	unsigned int ptr_mask;
235 236
	void __iomem *piobuf;
	unsigned int piobuf_offset;
237
	bool initialised;
238 239 240

	/* Members used mainly on the completion path */
	unsigned int read_count ____cacheline_aligned_in_smp;
241
	unsigned int old_write_count;
242
	unsigned int merge_events;
243 244 245 246 247

	/* Members used only on the xmit path */
	unsigned int insert_count ____cacheline_aligned_in_smp;
	unsigned int write_count;
	unsigned int old_read_count;
B
Ben Hutchings 已提交
248 249 250
	unsigned int tso_bursts;
	unsigned int tso_long_headers;
	unsigned int tso_packets;
251
	unsigned int pushes;
252
	unsigned int pio_packets;
253 254
	/* Statistics to supplement MAC stats */
	unsigned long tx_packets;
255 256 257 258

	/* Members shared between paths and sometimes updated */
	unsigned int empty_read_count ____cacheline_aligned_in_smp;
#define EFX_EMPTY_COUNT_VALID 0x80000000
259
	atomic_t flush_outstanding;
260 261 262 263 264
};

/**
 * struct efx_rx_buffer - An Efx RX data buffer
 * @dma_addr: DMA base address of the buffer
265
 * @page: The associated page buffer.
266
 *	Will be %NULL if the buffer slot is currently free.
267 268
 * @page_offset: If pending: offset in @page of DMA base address.
 *	If completed: offset in @page of Ethernet header.
269 270
 * @len: If pending: length for DMA descriptor.
 *	If completed: received length, excluding hash prefix.
271 272
 * @flags: Flags for buffer and packet state.  These are only set on the
 *	first buffer of a scattered packet.
273 274 275
 */
struct efx_rx_buffer {
	dma_addr_t dma_addr;
276
	struct page *page;
277 278
	u16 page_offset;
	u16 len;
279
	u16 flags;
280
};
281
#define EFX_RX_BUF_LAST_IN_PAGE	0x0001
282 283
#define EFX_RX_PKT_CSUMMED	0x0002
#define EFX_RX_PKT_DISCARD	0x0004
284
#define EFX_RX_PKT_TCP		0x0040
285
#define EFX_RX_PKT_PREFIX_LEN	0x0080	/* length is in prefix only */
286

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
/**
 * struct efx_rx_page_state - Page-based rx buffer state
 *
 * Inserted at the start of every page allocated for receive buffers.
 * Used to facilitate sharing dma mappings between recycled rx buffers
 * and those passed up to the kernel.
 *
 * @dma_addr: The dma address of this page.
 */
struct efx_rx_page_state {
	dma_addr_t dma_addr;

	unsigned int __pad[0] ____cacheline_aligned;
};

302 303 304
/**
 * struct efx_rx_queue - An Efx RX queue
 * @efx: The associated Efx NIC
305 306
 * @core_index:  Index of network core RX queue.  Will be >= 0 iff this
 *	is associated with a real RX queue.
307 308
 * @buffer: The software buffer ring
 * @rxd: The hardware descriptor ring
309
 * @ptr_mask: The size of the ring minus 1.
310
 * @refill_enabled: Enable refill whenever fill level is low
311 312
 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
 *	@rxq_flush_pending.
313 314 315
 * @added_count: Number of buffers added to the receive queue.
 * @notified_count: Number of buffers given to NIC (<= @added_count).
 * @removed_count: Number of buffers removed from the receive queue.
J
Jon Cooper 已提交
316 317
 * @scatter_n: Used by NIC specific receive code.
 * @scatter_len: Used by NIC specific receive code.
318 319 320 321 322 323 324 325 326
 * @page_ring: The ring to store DMA mapped pages for reuse.
 * @page_add: Counter to calculate the write pointer for the recycle ring.
 * @page_remove: Counter to calculate the read pointer for the recycle ring.
 * @page_recycle_count: The number of pages that have been recycled.
 * @page_recycle_failed: The number of pages that couldn't be recycled because
 *      the kernel still held a reference to them.
 * @page_recycle_full: The number of pages that were released because the
 *      recycle ring was full.
 * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
327 328 329 330 331 332
 * @max_fill: RX descriptor maximum fill level (<= ring size)
 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
 *	(<= @max_fill)
 * @min_fill: RX descriptor minimum non-zero fill level.
 *	This records the minimum fill level observed when a ring
 *	refill was triggered.
333
 * @recycle_count: RX buffer recycle counter.
334
 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
335 336 337
 */
struct efx_rx_queue {
	struct efx_nic *efx;
338
	int core_index;
339 340
	struct efx_rx_buffer *buffer;
	struct efx_special_buffer rxd;
341
	unsigned int ptr_mask;
342
	bool refill_enabled;
343
	bool flush_pending;
344

345 346 347
	unsigned int added_count;
	unsigned int notified_count;
	unsigned int removed_count;
348
	unsigned int scatter_n;
J
Jon Cooper 已提交
349
	unsigned int scatter_len;
350 351 352 353 354 355 356
	struct page **page_ring;
	unsigned int page_add;
	unsigned int page_remove;
	unsigned int page_recycle_count;
	unsigned int page_recycle_failed;
	unsigned int page_recycle_full;
	unsigned int page_ptr_mask;
357 358 359 360
	unsigned int max_fill;
	unsigned int fast_fill_trigger;
	unsigned int min_fill;
	unsigned int min_overfill;
361
	unsigned int recycle_count;
362
	struct timer_list slow_fill;
363
	unsigned int slow_fill_count;
364 365
	/* Statistics to supplement MAC stats */
	unsigned long rx_packets;
366 367
};

368 369 370 371 372 373 374
enum efx_sync_events_state {
	SYNC_EVENTS_DISABLED = 0,
	SYNC_EVENTS_QUIESCENT,
	SYNC_EVENTS_REQUESTED,
	SYNC_EVENTS_VALID,
};

375 376 377 378 379 380 381 382 383
/**
 * struct efx_channel - An Efx channel
 *
 * A channel comprises an event queue, at least one TX queue, at least
 * one RX queue, and an associated tasklet for processing the event
 * queue.
 *
 * @efx: Associated Efx NIC
 * @channel: Channel instance number
384
 * @type: Channel type definition
385
 * @eventq_init: Event queue initialised flag
386 387
 * @enabled: Channel enabled indicator
 * @irq: IRQ number (MSI and MSI-X only)
388
 * @irq_moderation: IRQ moderation value (in hardware ticks)
389 390
 * @napi_dev: Net device used with NAPI
 * @napi_str: NAPI control structure
391 392
 * @state: state for NAPI vs busy polling
 * @state_lock: lock protecting @state
393
 * @eventq: Event queue buffer
394
 * @eventq_mask: Event queue pointer mask
395
 * @eventq_read_ptr: Event queue read pointer
396
 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
397 398
 * @irq_count: Number of IRQs since last adaptive moderation decision
 * @irq_mod_score: IRQ moderation score
399 400 401
 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
B
Ben Hutchings 已提交
402
 * @n_rx_mcast_mismatch: Count of unmatched multicast frames
403 404 405
 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
 * @n_rx_overlength: Count of RX_OVERLENGTH errors
 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
406 407
 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
 *	lack of descriptors
408 409
 * @n_rx_merge_events: Number of RX merged completion events
 * @n_rx_merge_packets: Number of RX packets completed by merged events
410 411 412 413
 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
 *	__efx_rx_packet(), or zero if there is none
 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
 *	by __efx_rx_packet(), if @rx_pkt_n_frags != 0
414 415
 * @rx_queue: RX queue for this channel
 * @tx_queue: TX queues for this channel
416 417 418
 * @sync_events_state: Current state of sync events on this channel
 * @sync_timestamp_major: Major part of the last ptp sync event
 * @sync_timestamp_minor: Minor part of the last ptp sync event
419 420 421 422
 */
struct efx_channel {
	struct efx_nic *efx;
	int channel;
423
	const struct efx_channel_type *type;
424
	bool eventq_init;
425
	bool enabled;
426 427 428 429
	int irq;
	unsigned int irq_moderation;
	struct net_device *napi_dev;
	struct napi_struct napi_str;
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
#ifdef CONFIG_NET_RX_BUSY_POLL
	unsigned int state;
	spinlock_t state_lock;
#define EFX_CHANNEL_STATE_IDLE		0
#define EFX_CHANNEL_STATE_NAPI		(1 << 0)  /* NAPI owns this channel */
#define EFX_CHANNEL_STATE_POLL		(1 << 1)  /* poll owns this channel */
#define EFX_CHANNEL_STATE_DISABLED	(1 << 2)  /* channel is disabled */
#define EFX_CHANNEL_STATE_NAPI_YIELD	(1 << 3)  /* NAPI yielded this channel */
#define EFX_CHANNEL_STATE_POLL_YIELD	(1 << 4)  /* poll yielded this channel */
#define EFX_CHANNEL_OWNED \
	(EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL)
#define EFX_CHANNEL_LOCKED \
	(EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED)
#define EFX_CHANNEL_USER_PEND \
	(EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD)
#endif /* CONFIG_NET_RX_BUSY_POLL */
446
	struct efx_special_buffer eventq;
447
	unsigned int eventq_mask;
448
	unsigned int eventq_read_ptr;
449
	int event_test_cpu;
450

451 452
	unsigned int irq_count;
	unsigned int irq_mod_score;
453 454 455
#ifdef CONFIG_RFS_ACCEL
	unsigned int rfs_filters_added;
#endif
456

457 458 459
	unsigned n_rx_tobe_disc;
	unsigned n_rx_ip_hdr_chksum_err;
	unsigned n_rx_tcp_udp_chksum_err;
B
Ben Hutchings 已提交
460
	unsigned n_rx_mcast_mismatch;
461 462 463
	unsigned n_rx_frm_trunc;
	unsigned n_rx_overlength;
	unsigned n_skbuff_leaks;
464
	unsigned int n_rx_nodesc_trunc;
465 466
	unsigned int n_rx_merge_events;
	unsigned int n_rx_merge_packets;
467

468 469
	unsigned int rx_pkt_n_frags;
	unsigned int rx_pkt_index;
470

471
	struct efx_rx_queue rx_queue;
472
	struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
473 474 475 476

	enum efx_sync_events_state sync_events_state;
	u32 sync_timestamp_major;
	u32 sync_timestamp_minor;
477 478
};

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void efx_channel_init_lock(struct efx_channel *channel)
{
	spin_lock_init(&channel->state_lock);
}

/* Called from the device poll routine to get ownership of a channel. */
static inline bool efx_channel_lock_napi(struct efx_channel *channel)
{
	bool rc = true;

	spin_lock_bh(&channel->state_lock);
	if (channel->state & EFX_CHANNEL_LOCKED) {
		WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
		channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD;
		rc = false;
	} else {
		/* we don't care if someone yielded */
		channel->state = EFX_CHANNEL_STATE_NAPI;
	}
	spin_unlock_bh(&channel->state_lock);
	return rc;
}

static inline void efx_channel_unlock_napi(struct efx_channel *channel)
{
	spin_lock_bh(&channel->state_lock);
	WARN_ON(channel->state &
		(EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD));

	channel->state &= EFX_CHANNEL_STATE_DISABLED;
	spin_unlock_bh(&channel->state_lock);
}

/* Called from efx_busy_poll(). */
static inline bool efx_channel_lock_poll(struct efx_channel *channel)
{
	bool rc = true;

	spin_lock_bh(&channel->state_lock);
	if ((channel->state & EFX_CHANNEL_LOCKED)) {
		channel->state |= EFX_CHANNEL_STATE_POLL_YIELD;
		rc = false;
	} else {
		/* preserve yield marks */
		channel->state |= EFX_CHANNEL_STATE_POLL;
	}
	spin_unlock_bh(&channel->state_lock);
	return rc;
}

/* Returns true if NAPI tried to get the channel while it was locked. */
static inline void efx_channel_unlock_poll(struct efx_channel *channel)
{
	spin_lock_bh(&channel->state_lock);
	WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);

	/* will reset state to idle, unless channel is disabled */
	channel->state &= EFX_CHANNEL_STATE_DISABLED;
	spin_unlock_bh(&channel->state_lock);
}

/* True if a socket is polling, even if it did not get the lock. */
static inline bool efx_channel_busy_polling(struct efx_channel *channel)
{
	WARN_ON(!(channel->state & EFX_CHANNEL_OWNED));
	return channel->state & EFX_CHANNEL_USER_PEND;
}

static inline void efx_channel_enable(struct efx_channel *channel)
{
	spin_lock_bh(&channel->state_lock);
	channel->state = EFX_CHANNEL_STATE_IDLE;
	spin_unlock_bh(&channel->state_lock);
}

/* False if the channel is currently owned. */
static inline bool efx_channel_disable(struct efx_channel *channel)
{
	bool rc = true;

	spin_lock_bh(&channel->state_lock);
	if (channel->state & EFX_CHANNEL_OWNED)
		rc = false;
	channel->state |= EFX_CHANNEL_STATE_DISABLED;
	spin_unlock_bh(&channel->state_lock);

	return rc;
}

#else /* CONFIG_NET_RX_BUSY_POLL */

static inline void efx_channel_init_lock(struct efx_channel *channel)
{
}

static inline bool efx_channel_lock_napi(struct efx_channel *channel)
{
	return true;
}

static inline void efx_channel_unlock_napi(struct efx_channel *channel)
{
}

static inline bool efx_channel_lock_poll(struct efx_channel *channel)
{
	return false;
}

static inline void efx_channel_unlock_poll(struct efx_channel *channel)
{
}

static inline bool efx_channel_busy_polling(struct efx_channel *channel)
{
	return false;
}

static inline void efx_channel_enable(struct efx_channel *channel)
{
}

static inline bool efx_channel_disable(struct efx_channel *channel)
{
	return true;
}
#endif /* CONFIG_NET_RX_BUSY_POLL */

B
Ben Hutchings 已提交
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
/**
 * struct efx_msi_context - Context for each MSI
 * @efx: The associated NIC
 * @index: Index of the channel/IRQ
 * @name: Name of the channel/IRQ
 *
 * Unlike &struct efx_channel, this is never reallocated and is always
 * safe for the IRQ handler to access.
 */
struct efx_msi_context {
	struct efx_nic *efx;
	unsigned int index;
	char name[IFNAMSIZ + 6];
};

623 624 625 626 627 628 629 630 631
/**
 * struct efx_channel_type - distinguishes traffic and extra channels
 * @handle_no_channel: Handle failure to allocate an extra channel
 * @pre_probe: Set up extra state prior to initialisation
 * @post_remove: Tear down extra state after finalisation, if allocated.
 *	May be called on channels that have not been probed.
 * @get_name: Generate the channel's name (used for its IRQ handler)
 * @copy: Copy the channel state prior to reallocation.  May be %NULL if
 *	reallocation is not supported.
632
 * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
633 634 635 636 637 638
 * @keep_eventq: Flag for whether event queue should be kept initialised
 *	while the device is stopped
 */
struct efx_channel_type {
	void (*handle_no_channel)(struct efx_nic *);
	int (*pre_probe)(struct efx_channel *);
639
	void (*post_remove)(struct efx_channel *);
640 641
	void (*get_name)(struct efx_channel *, char *buf, size_t len);
	struct efx_channel *(*copy)(const struct efx_channel *);
642
	bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
643 644 645
	bool keep_eventq;
};

646 647 648 649 650 651
enum efx_led_mode {
	EFX_LED_OFF	= 0,
	EFX_LED_ON	= 1,
	EFX_LED_DEFAULT	= 2
};

652 653 654
#define STRING_TABLE_LOOKUP(val, member) \
	((val) < member ## _max) ? member ## _names[val] : "(invalid)"

655
extern const char *const efx_loopback_mode_names[];
656 657 658 659
extern const unsigned int efx_loopback_mode_max;
#define LOOPBACK_MODE(efx) \
	STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)

660
extern const char *const efx_reset_type_names[];
661 662 663
extern const unsigned int efx_reset_type_max;
#define RESET_TYPE(type) \
	STRING_TABLE_LOOKUP(type, efx_reset_type)
664

665 666 667 668 669 670 671 672 673 674
enum efx_int_mode {
	/* Be careful if altering to correct macro below */
	EFX_INT_MODE_MSIX = 0,
	EFX_INT_MODE_MSI = 1,
	EFX_INT_MODE_LEGACY = 2,
	EFX_INT_MODE_MAX	/* Insert any new items before this */
};
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)

enum nic_state {
675 676 677
	STATE_UNINIT = 0,	/* device being probed/removed or is frozen */
	STATE_READY = 1,	/* hardware ready and netdev registered */
	STATE_DISABLED = 2,	/* device disabled due to hardware errors */
678
	STATE_RECOVERY = 3,	/* device recovering from PCI error */
679 680 681 682 683 684
};

/* Forward declaration */
struct efx_nic;

/* Pseudo bit-mask flow control field */
685 686 687
#define EFX_FC_RX	FLOW_CTRL_RX
#define EFX_FC_TX	FLOW_CTRL_TX
#define EFX_FC_AUTO	4
688

689 690 691 692 693 694 695 696 697 698
/**
 * struct efx_link_state - Current state of the link
 * @up: Link is up
 * @fd: Link is full-duplex
 * @fc: Actual flow control flags
 * @speed: Link speed (Mbps)
 */
struct efx_link_state {
	bool up;
	bool fd;
699
	u8 fc;
700 701 702
	unsigned int speed;
};

S
Steve Hodgson 已提交
703 704 705 706 707 708 709
static inline bool efx_link_state_equal(const struct efx_link_state *left,
					const struct efx_link_state *right)
{
	return left->up == right->up && left->fd == right->fd &&
		left->fc == right->fc && left->speed == right->speed;
}

710 711
/**
 * struct efx_phy_operations - Efx PHY operations table
712 713
 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
 *	efx->loopback_modes.
714 715 716
 * @init: Initialise PHY
 * @fini: Shut down PHY
 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
S
Steve Hodgson 已提交
717 718
 * @poll: Update @link_state and report whether it changed.
 *	Serialised by the mac_lock.
719 720
 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
721
 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
B
Ben Hutchings 已提交
722
 *	(only needed where AN bit is set in mmds)
723
 * @test_alive: Test that PHY is 'alive' (online)
724
 * @test_name: Get the name of a PHY-specific test/result
725
 * @run_tests: Run tests and record results as appropriate (offline).
726
 *	Flags are the ethtool tests flags.
727 728
 */
struct efx_phy_operations {
729
	int (*probe) (struct efx_nic *efx);
730 731
	int (*init) (struct efx_nic *efx);
	void (*fini) (struct efx_nic *efx);
732
	void (*remove) (struct efx_nic *efx);
B
Ben Hutchings 已提交
733
	int (*reconfigure) (struct efx_nic *efx);
S
Steve Hodgson 已提交
734
	bool (*poll) (struct efx_nic *efx);
735 736 737 738
	void (*get_settings) (struct efx_nic *efx,
			      struct ethtool_cmd *ecmd);
	int (*set_settings) (struct efx_nic *efx,
			     struct ethtool_cmd *ecmd);
739
	void (*set_npage_adv) (struct efx_nic *efx, u32);
740
	int (*test_alive) (struct efx_nic *efx);
741
	const char *(*test_name) (struct efx_nic *efx, unsigned int index);
742
	int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
743 744 745 746 747
	int (*get_module_eeprom) (struct efx_nic *efx,
			       struct ethtool_eeprom *ee,
			       u8 *data);
	int (*get_module_info) (struct efx_nic *efx,
				struct ethtool_modinfo *modinfo);
748 749
};

750
/**
751
 * enum efx_phy_mode - PHY operating mode flags
752 753
 * @PHY_MODE_NORMAL: on and should pass traffic
 * @PHY_MODE_TX_DISABLED: on with TX disabled
754 755
 * @PHY_MODE_LOW_POWER: set to low power through MDIO
 * @PHY_MODE_OFF: switched off through external control
756 757 758 759 760
 * @PHY_MODE_SPECIAL: on but will not pass traffic
 */
enum efx_phy_mode {
	PHY_MODE_NORMAL		= 0,
	PHY_MODE_TX_DISABLED	= 1,
761 762
	PHY_MODE_LOW_POWER	= 2,
	PHY_MODE_OFF		= 4,
763 764 765 766 767
	PHY_MODE_SPECIAL	= 8,
};

static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
{
B
Ben Hutchings 已提交
768
	return !!(mode & ~PHY_MODE_TX_DISABLED);
769 770
}

771 772 773 774 775 776
/**
 * struct efx_hw_stat_desc - Description of a hardware statistic
 * @name: Name of the statistic as visible through ethtool, or %NULL if
 *	it should not be exposed
 * @dma_width: Width in bits (0 for non-DMA statistics)
 * @offset: Offset within stats (ignored for non-DMA statistics)
777
 */
778 779 780 781
struct efx_hw_stat_desc {
	const char *name;
	u16 dma_width;
	u16 offset;
782 783 784 785 786 787 788 789 790 791 792 793 794 795
};

/* Number of bits used in a multicast filter hash address */
#define EFX_MCAST_HASH_BITS 8

/* Number of (single-bit) entries in a multicast filter hash */
#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)

/* An Efx multicast filter hash */
union efx_multicast_hash {
	u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
	efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};

796
struct vfdi_status;
B
Ben Hutchings 已提交
797

798 799 800 801
/**
 * struct efx_nic - an Efx NIC
 * @name: Device name (net device name or bus id before net device registered)
 * @pci_dev: The PCI device
802 803 804 805 806 807 808
 * @node: List node for maintaning primary/secondary function lists
 * @primary: &struct efx_nic instance for the primary function of this
 *	controller.  May be the same structure, and may be %NULL if no
 *	primary function is bound.  Serialised by rtnl_lock.
 * @secondary_list: List of &struct efx_nic instances for the secondary PCI
 *	functions of the controller, if this is for the primary function.
 *	Serialised by rtnl_lock.
809 810
 * @type: Controller type attributes
 * @legacy_irq: IRQ number
811 812
 * @workqueue: Workqueue for port reconfigures and the HW monitor.
 *	Work items do not hold and must not acquire RTNL.
813
 * @workqueue_name: Name of workqueue
814 815 816 817
 * @reset_work: Scheduled reset workitem
 * @membase_phys: Memory BAR value as physical address
 * @membase: Memory BAR value
 * @interrupt_mode: Interrupt mode
818
 * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
819 820
 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
 * @irq_rx_moderation: IRQ moderation time for RX event queues
821
 * @msg_enable: Log message enable flags
822
 * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
823
 * @reset_pending: Bitmask for pending resets
824 825 826
 * @tx_queue: TX DMA queues
 * @rx_queue: RX DMA queues
 * @channel: Channels
B
Ben Hutchings 已提交
827
 * @msi_context: Context for each MSI
828 829
 * @extra_channel_types: Types of extra (non-traffic) channels that
 *	should be allocated for this NIC
830 831
 * @rxq_entries: Size of receive queues requested by user.
 * @txq_entries: Size of transmit queues requested by user.
832 833
 * @txq_stop_thresh: TX queue fill level at or above which we stop it.
 * @txq_wake_thresh: TX queue fill level at or below which we wake it.
834 835 836
 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
 * @sram_lim_qw: Qword address limit of SRAM
837
 * @next_buffer_table: First available buffer table id
838
 * @n_channels: Number of channels in use
B
Ben Hutchings 已提交
839 840
 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
 * @n_tx_channels: Number of channels used for TX
841 842
 * @rx_ip_align: RX DMA address offset to have IP header aligned in
 *	in accordance with NET_IP_ALIGN
843
 * @rx_dma_len: Current maximum RX DMA length
844
 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
845 846
 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
 *	for use in sk_buff::truesize
847 848 849
 * @rx_prefix_size: Size of RX prefix before packet data
 * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
 *	(valid only if @rx_prefix_size != 0; always negative)
850 851
 * @rx_packet_len_offset: Offset of RX packet length from start of packet data
 *	(valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
852 853
 * @rx_packet_ts_offset: Offset of timestamp from start of packet data
 *	(valid only if channel->sync_timestamps_enabled; always negative)
854
 * @rx_hash_key: Toeplitz hash key for RSS
855
 * @rx_indir_table: Indirection table for RSS
856
 * @rx_scatter: Scatter mode enabled for receives
857 858
 * @int_error_count: Number of internal errors seen recently
 * @int_error_expire: Time at which error count will be expired
B
Ben Hutchings 已提交
859 860
 * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
 *	acknowledge but do nothing else.
861
 * @irq_status: Interrupt status buffer
862
 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
863
 * @irq_level: IRQ level/index for IRQs not triggered by an event queue
864
 * @selftest_work: Work item for asynchronous self-test
865
 * @mtd_list: List of MTDs attached to the NIC
L
Lucas De Marchi 已提交
866
 * @nic_data: Hardware dependent state
867
 * @mcdi: Management-Controller-to-Driver Interface state
B
Ben Hutchings 已提交
868
 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
869
 *	efx_monitor() and efx_reconfigure_port()
870
 * @port_enabled: Port enabled indicator.
S
Steve Hodgson 已提交
871 872 873 874
 *	Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
 *	efx_mac_work() with kernel interfaces. Safe to read under any
 *	one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
 *	be held to modify it.
875 876 877 878 879 880
 * @port_initialized: Port initialized?
 * @net_dev: Operating system network device. Consider holding the rtnl lock
 * @stats_buffer: DMA buffer for statistics
 * @phy_type: PHY type
 * @phy_op: PHY interface
 * @phy_data: PHY private data (including PHY-specific stats)
881
 * @mdio: PHY MDIO interface
882
 * @mdio_bus: PHY MDIO bus ID (only used by Siena)
B
Ben Hutchings 已提交
883
 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
B
Ben Hutchings 已提交
884
 * @link_advertising: Autonegotiation advertising flags
885
 * @link_state: Current state of the link
886
 * @n_link_state_changes: Number of times the link has changed state
887 888 889 890
 * @unicast_filter: Flag for Falcon-arch simple unicast filter.
 *	Protected by @mac_lock.
 * @multicast_hash: Multicast hash table for Falcon-arch.
 *	Protected by @mac_lock.
B
Ben Hutchings 已提交
891
 * @wanted_fc: Wanted flow control flags
892 893 894
 * @fc_disable: When non-zero flow control is disabled. Typically used to
 *	ensure that network back pressure doesn't delay dma queue flushes.
 *	Serialised by the rtnl lock.
895
 * @mac_work: Work item for changing MAC promiscuity and multicast hash
896 897 898
 * @loopback_mode: Loopback status
 * @loopback_modes: Supported loopback mode bitmask
 * @loopback_selftest: Offline self-test private state
899 900 901 902 903
 * @filter_lock: Filter table lock
 * @filter_state: Architecture-dependent filter table state
 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
 *	indexed by filter ID
 * @rps_expire_index: Next index to check for expiry in @rps_flow_id
904
 * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
905 906 907 908 909 910
 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
 *	Decremented when the efx_flush_rx_queue() is called.
 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
 *	completed (either success or failure). Not used when MCDI is used to
 *	flush receive queues.
 * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
911 912 913
 * @vf_count: Number of VFs intended to be enabled.
 * @vf_init_count: Number of VFs that have been fully initialised.
 * @vi_scale: log2 number of vnics per VF.
914
 * @ptp_data: PTP state data
915
 * @vpd_sn: Serial number read from VPD
916 917
 * @monitor_work: Hardware monitor workitem
 * @biu_lock: BIU (bus interface unit) lock
918 919 920
 * @last_irq_cpu: Last CPU to handle a possible test interrupt.  This
 *	field is used by efx_test_interrupts() to verify that an
 *	interrupt has occurred.
921 922
 * @stats_lock: Statistics update lock. Must be held when calling
 *	efx_nic_type::{update,start,stop}_stats.
923
 * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
924
 *
925
 * This is stored in the private area of the &struct net_device.
926 927
 */
struct efx_nic {
928 929
	/* The following fields should be written very rarely */

930
	char name[IFNAMSIZ];
931 932 933
	struct list_head node;
	struct efx_nic *primary;
	struct list_head secondary_list;
934
	struct pci_dev *pci_dev;
935
	unsigned int port_num;
936 937
	const struct efx_nic_type *type;
	int legacy_irq;
938
	bool eeh_disabled_legacy_irq;
939
	struct workqueue_struct *workqueue;
940
	char workqueue_name[16];
941
	struct work_struct reset_work;
942
	resource_size_t membase_phys;
943
	void __iomem *membase;
944

945
	enum efx_int_mode interrupt_mode;
946
	unsigned int timer_quantum_ns;
947 948
	bool irq_rx_adaptive;
	unsigned int irq_rx_moderation;
949
	u32 msg_enable;
950 951

	enum nic_state state;
952
	unsigned long reset_pending;
953

954
	struct efx_channel *channel[EFX_MAX_CHANNELS];
B
Ben Hutchings 已提交
955
	struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
956 957
	const struct efx_channel_type *
	extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
958

959 960
	unsigned rxq_entries;
	unsigned txq_entries;
961 962 963
	unsigned int txq_stop_thresh;
	unsigned int txq_wake_thresh;

964 965 966
	unsigned tx_dc_base;
	unsigned rx_dc_base;
	unsigned sram_lim_qw;
967
	unsigned next_buffer_table;
968 969

	unsigned int max_channels;
B
Ben Hutchings 已提交
970 971
	unsigned n_channels;
	unsigned n_rx_channels;
972
	unsigned rss_spread;
973
	unsigned tx_channel_offset;
B
Ben Hutchings 已提交
974
	unsigned n_tx_channels;
975
	unsigned int rx_ip_align;
976
	unsigned int rx_dma_len;
977
	unsigned int rx_buffer_order;
978
	unsigned int rx_buffer_truesize;
979
	unsigned int rx_page_buf_step;
980
	unsigned int rx_bufs_per_page;
981
	unsigned int rx_pages_per_batch;
982 983
	unsigned int rx_prefix_size;
	int rx_packet_hash_offset;
984
	int rx_packet_len_offset;
985
	int rx_packet_ts_offset;
986
	u8 rx_hash_key[40];
987
	u32 rx_indir_table[128];
988
	bool rx_scatter;
989

990 991 992
	unsigned int_error_count;
	unsigned long int_error_expire;

B
Ben Hutchings 已提交
993
	bool irq_soft_enabled;
994
	struct efx_buffer irq_status;
995
	unsigned irq_zero_count;
996
	unsigned irq_level;
997
	struct delayed_work selftest_work;
998

999 1000 1001
#ifdef CONFIG_SFC_MTD
	struct list_head mtd_list;
#endif
1002

1003
	void *nic_data;
1004
	struct efx_mcdi_data *mcdi;
1005 1006

	struct mutex mac_lock;
1007
	struct work_struct mac_work;
1008
	bool port_enabled;
1009

1010
	bool mc_bist_for_other_fn;
1011
	bool port_initialized;
1012 1013 1014
	struct net_device *net_dev;

	struct efx_buffer stats_buffer;
1015 1016 1017
	u64 rx_nodesc_drops_total;
	u64 rx_nodesc_drops_while_down;
	bool rx_nodesc_drops_prev_state;
1018

1019
	unsigned int phy_type;
1020
	const struct efx_phy_operations *phy_op;
1021
	void *phy_data;
1022
	struct mdio_if_info mdio;
1023
	unsigned int mdio_bus;
1024
	enum efx_phy_mode phy_mode;
1025

B
Ben Hutchings 已提交
1026
	u32 link_advertising;
1027
	struct efx_link_state link_state;
1028 1029
	unsigned int n_link_state_changes;

1030
	bool unicast_filter;
1031
	union efx_multicast_hash multicast_hash;
1032
	u8 wanted_fc;
1033
	unsigned fc_disable;
1034 1035

	atomic_t rx_reset;
1036
	enum efx_loopback_mode loopback_mode;
1037
	u64 loopback_modes;
1038 1039

	void *loopback_selftest;
B
Ben Hutchings 已提交
1040

1041 1042 1043 1044 1045 1046
	spinlock_t filter_lock;
	void *filter_state;
#ifdef CONFIG_RFS_ACCEL
	u32 *rps_flow_id;
	unsigned int rps_expire_index;
#endif
1047

1048
	atomic_t active_queues;
1049 1050 1051 1052
	atomic_t rxq_flush_pending;
	atomic_t rxq_flush_outstanding;
	wait_queue_head_t flush_wq;

1053 1054 1055 1056 1057 1058
#ifdef CONFIG_SFC_SRIOV
	unsigned vf_count;
	unsigned vf_init_count;
	unsigned vi_scale;
#endif

1059 1060
	struct efx_ptp_data *ptp_data;

1061 1062
	char *vpd_sn;

1063 1064 1065 1066
	/* The following fields may be written more often */

	struct delayed_work monitor_work ____cacheline_aligned_in_smp;
	spinlock_t biu_lock;
1067
	int last_irq_cpu;
1068
	spinlock_t stats_lock;
1069
	atomic_t n_rx_noskb_drops;
1070 1071
};

1072 1073 1074 1075 1076
static inline int efx_dev_registered(struct efx_nic *efx)
{
	return efx->net_dev->reg_state == NETREG_REGISTERED;
}

1077 1078
static inline unsigned int efx_port_num(struct efx_nic *efx)
{
1079
	return efx->port_num;
1080 1081
}

1082 1083 1084 1085 1086 1087 1088 1089
struct efx_mtd_partition {
	struct list_head node;
	struct mtd_info mtd;
	const char *dev_type_name;
	const char *type_name;
	char name[IFNAMSIZ + 20];
};

1090 1091
/**
 * struct efx_nic_type - Efx device type definition
1092
 * @mem_bar: Get the memory BAR
1093
 * @mem_map_size: Get memory BAR mapped size
1094 1095 1096
 * @probe: Probe the controller
 * @remove: Free resources allocated by probe()
 * @init: Initialise the controller
1097 1098
 * @dimension_resources: Dimension controller resources (buffer table,
 *	and VIs once the available interrupt resources are clear)
1099 1100
 * @fini: Shut down the controller
 * @monitor: Periodic function for polling link state and hardware monitor
1101 1102
 * @map_reset_reason: Map ethtool reset reason to a reset method
 * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
1103 1104 1105 1106
 * @reset: Reset the controller hardware and possibly the PHY.  This will
 *	be called while the controller is uninitialised.
 * @probe_port: Probe the MAC and PHY
 * @remove_port: Free resources allocated by probe_port()
1107
 * @handle_global_event: Handle a "global" event (may be %NULL)
1108
 * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
1109
 * @prepare_flush: Prepare the hardware for flushing the DMA queues
1110 1111 1112
 *	(for Falcon architecture)
 * @finish_flush: Clean up after flushing the DMA queues (for Falcon
 *	architecture)
1113 1114
 * @prepare_flr: Prepare for an FLR
 * @finish_flr: Clean up after an FLR
1115 1116 1117
 * @describe_stats: Describe statistics for ethtool
 * @update_stats: Update statistics not provided by event handling.
 *	Either argument may be %NULL.
1118
 * @start_stats: Start the regular fetching of statistics
1119
 * @pull_stats: Pull stats from the NIC and wait until they arrive.
1120
 * @stop_stats: Stop the regular fetching of statistics
1121
 * @set_id_led: Set state of identifying LED or revert to automatic function
1122
 * @push_irq_moderation: Apply interrupt moderation value
B
Ben Hutchings 已提交
1123
 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
1124
 * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
1125 1126
 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
 *	to the hardware.  Serialised by the mac_lock.
1127
 * @check_mac_fault: Check MAC fault state. True if fault present.
1128 1129 1130
 * @get_wol: Get WoL configuration from driver state
 * @set_wol: Push WoL configuration to the NIC
 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
1131
 * @test_chip: Test registers.  May use efx_farch_test_registers(), and is
1132
 *	expected to reset the NIC.
1133
 * @test_nvram: Test validity of NVRAM contents
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
 * @mcdi_request: Send an MCDI request with the given header and SDU.
 *	The SDU length may be any value from 0 up to the protocol-
 *	defined maximum, but its buffer will be padded to a multiple
 *	of 4 bytes.
 * @mcdi_poll_response: Test whether an MCDI response is available.
 * @mcdi_read_response: Read the MCDI response PDU.  The offset will
 *	be a multiple of 4.  The length may not be, but the buffer
 *	will be padded so it is safe to round up.
 * @mcdi_poll_reboot: Test whether the MCDI has rebooted.  If so,
 *	return an appropriate error code for aborting any current
 *	request; otherwise return 0.
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
 * @irq_enable_master: Enable IRQs on the NIC.  Each event queue must
 *	be separately enabled after this.
 * @irq_test_generate: Generate a test IRQ
 * @irq_disable_non_ev: Disable non-event IRQs on the NIC.  Each event
 *	queue must be separately disabled before this.
 * @irq_handle_msi: Handle MSI for a channel.  The @dev_id argument is
 *	a pointer to the &struct efx_msi_context for the channel.
 * @irq_handle_legacy: Handle legacy interrupt.  The @dev_id argument
 *	is a pointer to the &struct efx_nic.
 * @tx_probe: Allocate resources for TX queue
 * @tx_init: Initialise TX queue on the NIC
 * @tx_remove: Free resources for TX queue
 * @tx_write: Write TX descriptors and doorbell
1158
 * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
 * @rx_probe: Allocate resources for RX queue
 * @rx_init: Initialise RX queue on the NIC
 * @rx_remove: Free resources for RX queue
 * @rx_write: Write RX descriptors and doorbell
 * @rx_defer_refill: Generate a refill reminder event
 * @ev_probe: Allocate resources for event queue
 * @ev_init: Initialise event queue on the NIC
 * @ev_fini: Deinitialise event queue on the NIC
 * @ev_remove: Free resources for event queue
 * @ev_process: Process events for a queue, up to the given NAPI quota
 * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
 * @ev_test_generate: Generate a test event
1171 1172 1173 1174 1175 1176 1177
 * @filter_table_probe: Probe filter capabilities and set up filter software state
 * @filter_table_restore: Restore filters removed from hardware
 * @filter_table_remove: Remove filters from hardware and tear down software state
 * @filter_update_rx_scatter: Update filters after change to rx scatter setting
 * @filter_insert: add or replace a filter
 * @filter_remove_safe: remove a filter by ID, carefully
 * @filter_get_safe: retrieve a filter by ID, carefully
1178 1179
 * @filter_clear_rx: Remove all RX filters whose priority is less than or
 *	equal to the given priority and is not %EFX_FILTER_PRI_AUTO
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
 * @filter_count_rx_used: Get the number of filters in use at a given priority
 * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
 * @filter_get_rx_ids: Get list of RX filters at a given priority
 * @filter_rfs_insert: Add or replace a filter for RFS.  This must be
 *	atomic.  The hardware change may be asynchronous but should
 *	not be delayed for long.  It may fail if this can't be done
 *	atomically.
 * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
 *	This must check whether the specified table entry is used by RFS
 *	and that rps_may_expire_flow() returns true for it.
1190 1191 1192 1193 1194 1195 1196 1197 1198
 * @mtd_probe: Probe and add MTD partitions associated with this net device,
 *	 using efx_mtd_add()
 * @mtd_rename: Set an MTD partition name using the net device name
 * @mtd_read: Read from an MTD partition
 * @mtd_erase: Erase part of an MTD partition
 * @mtd_write: Write to an MTD partition
 * @mtd_sync: Wait for write-back to complete on MTD partition.  This
 *	also notifies the driver that a writer has finished using this
 *	partition.
1199
 * @ptp_write_host_time: Send host time to MC as part of sync protocol
1200 1201
 * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX
 *	timestamping, possibly only temporarily for the purposes of a reset.
1202 1203 1204
 * @ptp_set_ts_config: Set hardware timestamp configuration.  The flags
 *	and tx_type will already have been validated but this operation
 *	must validate and update rx_filter.
1205
 * @revision: Hardware architecture revision
1206 1207 1208 1209 1210 1211
 * @txd_ptr_tbl_base: TX descriptor ring base address
 * @rxd_ptr_tbl_base: RX descriptor ring base address
 * @buf_tbl_base: Buffer table base address
 * @evq_ptr_tbl_base: Event queue pointer table base address
 * @evq_rptr_tbl_base: Event queue read-pointer table base address
 * @max_dma_mask: Maximum possible DMA mask
1212 1213
 * @rx_prefix_size: Size of RX prefix before packet data
 * @rx_hash_offset: Offset of RX flow hash within prefix
1214
 * @rx_ts_offset: Offset of timestamp within prefix
1215
 * @rx_buffer_padding: Size of padding at end of RX packet
J
Jon Cooper 已提交
1216 1217
 * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
 * @always_rx_scatter: NIC will always scatter packets to multiple buffers
1218 1219
 * @max_interrupt_mode: Highest capability interrupt mode supported
 *	from &enum efx_init_mode.
1220
 * @timer_period_max: Maximum period of interrupt timer (in ticks)
1221 1222
 * @offload_features: net_device feature flags for protocol offload
 *	features implemented in hardware
B
Ben Hutchings 已提交
1223
 * @mcdi_max_ver: Maximum MCDI version supported
1224
 * @hwtstamp_filters: Mask of hardware timestamp filter types supported
1225 1226
 */
struct efx_nic_type {
1227
	unsigned int mem_bar;
1228
	unsigned int (*mem_map_size)(struct efx_nic *efx);
1229 1230 1231
	int (*probe)(struct efx_nic *efx);
	void (*remove)(struct efx_nic *efx);
	int (*init)(struct efx_nic *efx);
1232
	int (*dimension_resources)(struct efx_nic *efx);
1233 1234
	void (*fini)(struct efx_nic *efx);
	void (*monitor)(struct efx_nic *efx);
1235 1236
	enum reset_type (*map_reset_reason)(enum reset_type reason);
	int (*map_reset_flags)(u32 *flags);
1237 1238 1239
	int (*reset)(struct efx_nic *efx, enum reset_type method);
	int (*probe_port)(struct efx_nic *efx);
	void (*remove_port)(struct efx_nic *efx);
1240
	bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
1241
	int (*fini_dmaq)(struct efx_nic *efx);
1242
	void (*prepare_flush)(struct efx_nic *efx);
1243
	void (*finish_flush)(struct efx_nic *efx);
1244 1245
	void (*prepare_flr)(struct efx_nic *efx);
	void (*finish_flr)(struct efx_nic *efx);
1246 1247 1248
	size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
	size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
			       struct rtnl_link_stats64 *core_stats);
1249
	void (*start_stats)(struct efx_nic *efx);
1250
	void (*pull_stats)(struct efx_nic *efx);
1251
	void (*stop_stats)(struct efx_nic *efx);
1252
	void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
1253
	void (*push_irq_moderation)(struct efx_channel *channel);
B
Ben Hutchings 已提交
1254
	int (*reconfigure_port)(struct efx_nic *efx);
1255
	void (*prepare_enable_fc_tx)(struct efx_nic *efx);
1256 1257
	int (*reconfigure_mac)(struct efx_nic *efx);
	bool (*check_mac_fault)(struct efx_nic *efx);
1258 1259 1260
	void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
	int (*set_wol)(struct efx_nic *efx, u32 type);
	void (*resume_wol)(struct efx_nic *efx);
1261
	int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
1262
	int (*test_nvram)(struct efx_nic *efx);
1263 1264 1265 1266 1267 1268 1269
	void (*mcdi_request)(struct efx_nic *efx,
			     const efx_dword_t *hdr, size_t hdr_len,
			     const efx_dword_t *sdu, size_t sdu_len);
	bool (*mcdi_poll_response)(struct efx_nic *efx);
	void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
				   size_t pdu_offset, size_t pdu_len);
	int (*mcdi_poll_reboot)(struct efx_nic *efx);
1270 1271 1272 1273 1274 1275 1276 1277 1278
	void (*irq_enable_master)(struct efx_nic *efx);
	void (*irq_test_generate)(struct efx_nic *efx);
	void (*irq_disable_non_ev)(struct efx_nic *efx);
	irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
	irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
	int (*tx_probe)(struct efx_tx_queue *tx_queue);
	void (*tx_init)(struct efx_tx_queue *tx_queue);
	void (*tx_remove)(struct efx_tx_queue *tx_queue);
	void (*tx_write)(struct efx_tx_queue *tx_queue);
1279 1280
	int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
				  const u32 *rx_indir_table);
1281 1282 1283 1284 1285 1286
	int (*rx_probe)(struct efx_rx_queue *rx_queue);
	void (*rx_init)(struct efx_rx_queue *rx_queue);
	void (*rx_remove)(struct efx_rx_queue *rx_queue);
	void (*rx_write)(struct efx_rx_queue *rx_queue);
	void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
	int (*ev_probe)(struct efx_channel *channel);
1287
	int (*ev_init)(struct efx_channel *channel);
1288 1289 1290 1291 1292
	void (*ev_fini)(struct efx_channel *channel);
	void (*ev_remove)(struct efx_channel *channel);
	int (*ev_process)(struct efx_channel *channel, int quota);
	void (*ev_read_ack)(struct efx_channel *channel);
	void (*ev_test_generate)(struct efx_channel *channel);
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
	int (*filter_table_probe)(struct efx_nic *efx);
	void (*filter_table_restore)(struct efx_nic *efx);
	void (*filter_table_remove)(struct efx_nic *efx);
	void (*filter_update_rx_scatter)(struct efx_nic *efx);
	s32 (*filter_insert)(struct efx_nic *efx,
			     struct efx_filter_spec *spec, bool replace);
	int (*filter_remove_safe)(struct efx_nic *efx,
				  enum efx_filter_priority priority,
				  u32 filter_id);
	int (*filter_get_safe)(struct efx_nic *efx,
			       enum efx_filter_priority priority,
			       u32 filter_id, struct efx_filter_spec *);
1305 1306
	int (*filter_clear_rx)(struct efx_nic *efx,
			       enum efx_filter_priority priority);
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
	u32 (*filter_count_rx_used)(struct efx_nic *efx,
				    enum efx_filter_priority priority);
	u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
	s32 (*filter_get_rx_ids)(struct efx_nic *efx,
				 enum efx_filter_priority priority,
				 u32 *buf, u32 size);
#ifdef CONFIG_RFS_ACCEL
	s32 (*filter_rfs_insert)(struct efx_nic *efx,
				 struct efx_filter_spec *spec);
	bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
				      unsigned int index);
#endif
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
#ifdef CONFIG_SFC_MTD
	int (*mtd_probe)(struct efx_nic *efx);
	void (*mtd_rename)(struct efx_mtd_partition *part);
	int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
			size_t *retlen, u8 *buffer);
	int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
	int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
			 size_t *retlen, const u8 *buffer);
	int (*mtd_sync)(struct mtd_info *mtd);
#endif
1329
	void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
1330
	int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
1331 1332
	int (*ptp_set_ts_config)(struct efx_nic *efx,
				 struct hwtstamp_config *init);
1333
	int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
1334 1335 1336 1337 1338
	int (*sriov_init)(struct efx_nic *efx);
	void (*sriov_fini)(struct efx_nic *efx);
	void (*sriov_mac_address_changed)(struct efx_nic *efx);
	bool (*sriov_wanted)(struct efx_nic *efx);
	void (*sriov_reset)(struct efx_nic *efx);
1339 1340 1341 1342 1343 1344 1345 1346
	void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
	int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
	int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
				 u8 qos);
	int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
				     bool spoofchk);
	int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
				   struct ifla_vf_info *ivi);
1347 1348 1349
	int (*vswitching_probe)(struct efx_nic *efx);
	int (*vswitching_restore)(struct efx_nic *efx);
	void (*vswitching_remove)(struct efx_nic *efx);
1350

1351
	int revision;
1352 1353 1354 1355 1356
	unsigned int txd_ptr_tbl_base;
	unsigned int rxd_ptr_tbl_base;
	unsigned int buf_tbl_base;
	unsigned int evq_ptr_tbl_base;
	unsigned int evq_rptr_tbl_base;
1357
	u64 max_dma_mask;
1358 1359
	unsigned int rx_prefix_size;
	unsigned int rx_hash_offset;
1360
	unsigned int rx_ts_offset;
1361
	unsigned int rx_buffer_padding;
1362
	bool can_rx_scatter;
J
Jon Cooper 已提交
1363
	bool always_rx_scatter;
1364
	unsigned int max_interrupt_mode;
1365
	unsigned int timer_period_max;
1366
	netdev_features_t offload_features;
B
Ben Hutchings 已提交
1367
	int mcdi_max_ver;
1368
	unsigned int max_rx_ip_filters;
1369
	u32 hwtstamp_filters;
1370 1371 1372 1373 1374 1375 1376 1377
};

/**************************************************************************
 *
 * Prototypes and inline functions
 *
 *************************************************************************/

1378 1379 1380 1381
static inline struct efx_channel *
efx_get_channel(struct efx_nic *efx, unsigned index)
{
	EFX_BUG_ON_PARANOID(index >= efx->n_channels);
1382
	return efx->channel[index];
1383 1384
}

1385 1386
/* Iterate over all used channels */
#define efx_for_each_channel(_channel, _efx)				\
1387 1388 1389 1390
	for (_channel = (_efx)->channel[0];				\
	     _channel;							\
	     _channel = (_channel->channel + 1 < (_efx)->n_channels) ?	\
		     (_efx)->channel[_channel->channel + 1] : NULL)
1391

1392 1393 1394 1395 1396 1397 1398
/* Iterate over all used channels in reverse */
#define efx_for_each_channel_rev(_channel, _efx)			\
	for (_channel = (_efx)->channel[(_efx)->n_channels - 1];	\
	     _channel;							\
	     _channel = _channel->channel ?				\
		     (_efx)->channel[_channel->channel - 1] : NULL)

1399 1400 1401 1402 1403 1404 1405
static inline struct efx_tx_queue *
efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
{
	EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
			    type >= EFX_TXQ_TYPES);
	return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
}
1406

1407 1408 1409 1410 1411 1412
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
	return channel->channel - channel->efx->tx_channel_offset <
		channel->efx->n_tx_channels;
}

1413 1414 1415
static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{
1416 1417 1418
	EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
			    type >= EFX_TXQ_TYPES);
	return &channel->tx_queue[type];
1419
}
1420

1421 1422 1423 1424 1425 1426
static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
{
	return !(tx_queue->efx->net_dev->num_tc < 2 &&
		 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
}

1427 1428
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel)		\
1429 1430 1431 1432
	if (!efx_channel_has_tx_queues(_channel))			\
		;							\
	else								\
		for (_tx_queue = (_channel)->tx_queue;			\
1433 1434
		     _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
			     efx_tx_queue_used(_tx_queue);		\
1435
		     _tx_queue++)
1436

1437 1438
/* Iterate over all possible TX queues belonging to a channel */
#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel)	\
1439 1440 1441 1442 1443 1444
	if (!efx_channel_has_tx_queues(_channel))			\
		;							\
	else								\
		for (_tx_queue = (_channel)->tx_queue;			\
		     _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES;	\
		     _tx_queue++)
1445

1446 1447
static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
{
1448
	return channel->rx_queue.core_index >= 0;
1449 1450
}

1451 1452 1453
static inline struct efx_rx_queue *
efx_channel_get_rx_queue(struct efx_channel *channel)
{
1454 1455
	EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
	return &channel->rx_queue;
1456 1457
}

1458 1459
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel)		\
1460 1461 1462 1463 1464 1465
	if (!efx_channel_has_rx_queue(_channel))			\
		;							\
	else								\
		for (_rx_queue = &(_channel)->rx_queue;			\
		     _rx_queue;						\
		     _rx_queue = NULL)
1466

1467 1468 1469
static inline struct efx_channel *
efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
{
1470
	return container_of(rx_queue, struct efx_channel, rx_queue);
1471 1472 1473 1474
}

static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
{
1475
	return efx_rx_queue_channel(rx_queue)->channel;
1476 1477
}

1478 1479 1480 1481 1482 1483
/* Returns a pointer to the specified receive buffer in the RX
 * descriptor queue.
 */
static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
						  unsigned int index)
{
1484
	return &rx_queue->buffer[index];
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
}

/**
 * EFX_MAX_FRAME_LEN - calculate maximum frame length
 *
 * This calculates the maximum frame length that will be used for a
 * given MTU.  The frame length will be equal to the MTU plus a
 * constant amount of header space and padding.  This is the quantity
 * that the net driver will program into the MAC as the maximum frame
 * length.
 *
1496
 * The 10G MAC requires 8-byte alignment on the frame
1497
 * length, so we round up to the nearest 8.
1498 1499 1500 1501 1502
 *
 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
 * XGMII cycle).  If the frame length reaches the maximum value in the
 * same cycle, the XMAC can miss the IPG altogether.  We work around
 * this by adding a further 16 bytes.
1503 1504
 */
#define EFX_MAX_FRAME_LEN(mtu) \
1505
	((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
1506

1507 1508 1509 1510 1511 1512 1513 1514
static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
{
	return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
}
static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
{
	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
}
1515 1516

#endif /* EFX_NET_DRIVER_H */