ixgbe.h 30.3 KB
Newer Older
1 2 3
/*******************************************************************************

  Intel 10 Gigabit PCI Express Linux driver
D
Don Skidmore 已提交
4
  Copyright(c) 1999 - 2013 Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22

  This program is free software; you can redistribute it and/or modify it
  under the terms and conditions of the GNU General Public License,
  version 2, as published by the Free Software Foundation.

  This program is distributed in the hope it will be useful, but WITHOUT
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  more details.

  You should have received a copy of the GNU General Public License along with
  this program; if not, write to the Free Software Foundation, Inc.,
  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.

  The full GNU General Public License is included in this distribution in
  the file called "COPYING".

  Contact Information:
23
  Linux NICS <linux.nics@intel.com>
24 25 26 27 28 29 30 31
  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497

*******************************************************************************/

#ifndef _IXGBE_H_
#define _IXGBE_H_

32
#include <linux/bitops.h>
33 34 35
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
36
#include <linux/cpumask.h>
37
#include <linux/aer.h>
38
#include <linux/if_vlan.h>
39
#include <linux/jiffies.h>
40

41
#include <linux/timecounter.h>
42 43 44
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>

45 46
#include "ixgbe_type.h"
#include "ixgbe_common.h"
47
#include "ixgbe_dcb.h"
48 49 50 51
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
52
#ifdef CONFIG_IXGBE_DCA
53 54
#include <linux/dca.h>
#endif
55

56
#include <net/busy_poll.h>
57

58
#ifdef CONFIG_NET_RX_BUSY_POLL
59
#define BP_EXTENDED_STATS
60
#endif
61 62 63
/* common prefix used by pr_<> macros */
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
64 65

/* TX/RX descriptor defines */
J
Jesse Brandeburg 已提交
66
#define IXGBE_DEFAULT_TXD		    512
67
#define IXGBE_DEFAULT_TX_WORK		    256
68 69 70
#define IXGBE_MAX_TXD			   4096
#define IXGBE_MIN_TXD			     64

71
#if (PAGE_SIZE < 8192)
J
Jesse Brandeburg 已提交
72
#define IXGBE_DEFAULT_RXD		    512
73 74 75
#else
#define IXGBE_DEFAULT_RXD		    128
#endif
76 77 78
#define IXGBE_MAX_RXD			   4096
#define IXGBE_MIN_RXD			     64

79 80
#define IXGBE_ETH_P_LLDP		 0x88CC

81
/* flow control */
82
#define IXGBE_MIN_FCRTL			   0x40
83
#define IXGBE_MAX_FCRTL			0x7FF80
84
#define IXGBE_MIN_FCRTH			  0x600
85
#define IXGBE_MAX_FCRTH			0x7FFF0
86
#define IXGBE_DEFAULT_FCPAUSE		 0xFFFF
87 88 89 90
#define IXGBE_MIN_FCPAUSE		      0
#define IXGBE_MAX_FCPAUSE		 0xFFFF

/* Supported Rx Buffer Sizes */
91
#define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */
92 93 94
#define IXGBE_RXBUFFER_2K    2048
#define IXGBE_RXBUFFER_3K    3072
#define IXGBE_RXBUFFER_4K    4096
95
#define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
96

97
/*
98 99 100 101 102 103
 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
 * this adds up to 448 bytes of extra data.
 *
 * Since netdev_alloc_skb now allocates a page fragment we can use a value
 * of 256 and the resultant skb will have a truesize of 960 or less.
104
 */
105
#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
106 107 108 109

/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE	16	/* Must be power of 2 */

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
enum ixgbe_tx_flags {
	/* cmd_type flags */
	IXGBE_TX_FLAGS_HW_VLAN	= 0x01,
	IXGBE_TX_FLAGS_TSO	= 0x02,
	IXGBE_TX_FLAGS_TSTAMP	= 0x04,

	/* olinfo flags */
	IXGBE_TX_FLAGS_CC	= 0x08,
	IXGBE_TX_FLAGS_IPV4	= 0x10,
	IXGBE_TX_FLAGS_CSUM	= 0x20,

	/* software defined flags */
	IXGBE_TX_FLAGS_SW_VLAN	= 0x40,
	IXGBE_TX_FLAGS_FCOE	= 0x80,
};

/* VLAN info */
127
#define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
128 129
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
130 131
#define IXGBE_TX_FLAGS_VLAN_SHIFT	16

132 133 134 135
#define IXGBE_MAX_VF_MC_ENTRIES         30
#define IXGBE_MAX_VF_FUNCTIONS          64
#define IXGBE_MAX_VFTA_ENTRIES          128
#define MAX_EMULATION_MAC_ADDRS         16
G
Greg Rose 已提交
136
#define IXGBE_MAX_PF_MACVLANS           15
137
#define VMDQ_P(p)   ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
138 139
#define IXGBE_82599_VF_DEVICE_ID        0x10ED
#define IXGBE_X540_VF_DEVICE_ID         0x1515
140 141 142 143 144 145 146 147

struct vf_data_storage {
	unsigned char vf_mac_addresses[ETH_ALEN];
	u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
	u16 num_vf_mc_hashes;
	u16 default_vf_vlan_id;
	u16 vlans_enabled;
	bool clear_to_send;
148 149 150
	bool pf_set_mac;
	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
	u16 pf_qos;
151
	u16 tx_rate;
152 153
	u16 vlan_count;
	u8 spoofchk_enabled;
154
	bool rss_query_enabled;
H
Hiroshi Shimamoto 已提交
155
	u8 trusted;
156
	int xcast_mode;
157
	unsigned int vf_api;
158 159
};

160 161 162 163 164 165
enum ixgbevf_xcast_modes {
	IXGBEVF_XCAST_MODE_NONE = 0,
	IXGBEVF_XCAST_MODE_MULTI,
	IXGBEVF_XCAST_MODE_ALLMULTI,
};

G
Greg Rose 已提交
166 167 168 169 170 171 172 173
struct vf_macvlans {
	struct list_head l;
	int vf;
	bool free;
	bool is_macvlan;
	u8 vf_macvlan[ETH_ALEN];
};

174 175 176 177 178
#define IXGBE_MAX_TXD_PWR	14
#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)

/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
179
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
180

181 182 183
/* wrapper around a pointer to a socket buffer,
 * so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
184
	union ixgbe_adv_tx_desc *next_to_watch;
185
	unsigned long time_stamp;
186 187 188
	struct sk_buff *skb;
	unsigned int bytecount;
	unsigned short gso_segs;
189
	__be16 protocol;
190 191
	DEFINE_DMA_UNMAP_ADDR(dma);
	DEFINE_DMA_UNMAP_LEN(len);
192
	u32 tx_flags;
193 194 195 196 197 198
};

struct ixgbe_rx_buffer {
	struct sk_buff *skb;
	dma_addr_t dma;
	struct page *page;
199
	unsigned int page_offset;
200 201 202 203 204
};

struct ixgbe_queue_stats {
	u64 packets;
	u64 bytes;
205
#ifdef BP_EXTENDED_STATS
206 207 208
	u64 yields;
	u64 misses;
	u64 cleaned;
209
#endif  /* BP_EXTENDED_STATS */
210 211
};

212 213 214
struct ixgbe_tx_queue_stats {
	u64 restart_queue;
	u64 tx_busy;
215
	u64 tx_done_old;
216 217 218 219 220 221 222 223
};

struct ixgbe_rx_queue_stats {
	u64 rsc_count;
	u64 rsc_flush;
	u64 non_eop_descs;
	u64 alloc_rx_page_failed;
	u64 alloc_rx_buff_failed;
224
	u64 csum_err;
225 226
};

227 228
#define IXGBE_TS_HDR_LEN 8

229
enum ixgbe_ring_state_t {
A
Alexander Duyck 已提交
230
	__IXGBE_TX_FDIR_INIT_DONE,
231
	__IXGBE_TX_XPS_INIT_DONE,
A
Alexander Duyck 已提交
232
	__IXGBE_TX_DETECT_HANG,
233
	__IXGBE_HANG_CHECK_ARMED,
A
Alexander Duyck 已提交
234
	__IXGBE_RX_RSC_ENABLED,
235
	__IXGBE_RX_CSUM_UDP_ZERO_ERR,
236
	__IXGBE_RX_FCOE,
A
Alexander Duyck 已提交
237 238
};

239 240 241 242 243 244 245 246 247
struct ixgbe_fwd_adapter {
	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
	struct net_device *netdev;
	struct ixgbe_adapter *real_adapter;
	unsigned int tx_base_queue;
	unsigned int rx_base_queue;
	int pool;
};

A
Alexander Duyck 已提交
248 249 250 251 252 253 254 255 256 257 258 259
#define check_for_tx_hang(ring) \
	test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
	set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
	clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define ring_is_rsc_enabled(ring) \
	test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
#define set_ring_rsc_enabled(ring) \
	set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
#define clear_ring_rsc_enabled(ring) \
	clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
260
struct ixgbe_ring {
261
	struct ixgbe_ring *next;	/* pointer to next ring in q_vector */
262 263 264
	struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
	struct net_device *netdev;	/* netdev ring belongs to */
	struct device *dev;		/* device for DMA mapping */
265
	struct ixgbe_fwd_adapter *l2_accel_priv;
266 267 268 269 270
	void *desc;			/* descriptor ring memory */
	union {
		struct ixgbe_tx_buffer *tx_buffer_info;
		struct ixgbe_rx_buffer *rx_buffer_info;
	};
A
Alexander Duyck 已提交
271
	unsigned long state;
272
	u8 __iomem *tail;
273 274
	dma_addr_t dma;			/* phys. address of descriptor ring */
	unsigned int size;		/* length in bytes */
275

276 277 278
	u16 count;			/* amount of descriptors */

	u8 queue_index; /* needed for multiqueue queue management */
A
Alexander Duyck 已提交
279 280 281 282 283
	u8 reg_idx;			/* holds the special value that gets
					 * the hardware register offset
					 * associated with this ring, which is
					 * different for DCB and RSS modes
					 */
284 285 286
	u16 next_to_use;
	u16 next_to_clean;

287 288
	unsigned long last_rx_timestamp;

289
	union {
290
		u16 next_to_alloc;
291 292 293 294 295
		struct {
			u8 atr_sample_rate;
			u8 atr_count;
		};
	};
296

297
	u8 dcb_tc;
298
	struct ixgbe_queue_stats stats;
E
Eric Dumazet 已提交
299
	struct u64_stats_sync syncp;
300 301 302 303
	union {
		struct ixgbe_tx_queue_stats tx_stats;
		struct ixgbe_rx_queue_stats rx_stats;
	};
J
Jesse Brandeburg 已提交
304
} ____cacheline_internodealigned_in_smp;
305

306 307
enum ixgbe_ring_f_enum {
	RING_F_NONE = 0,
308
	RING_F_VMDQ,  /* SR-IOV uses the same ring feature */
309
	RING_F_RSS,
310
	RING_F_FDIR,
311 312 313
#ifdef IXGBE_FCOE
	RING_F_FCOE,
#endif /* IXGBE_FCOE */
314 315 316 317

	RING_F_ARRAY_SIZE      /* must be last in enum set */
};

318 319 320 321 322 323 324 325 326 327 328
#define IXGBE_MAX_RSS_INDICES		16
#define IXGBE_MAX_RSS_INDICES_X550	64
#define IXGBE_MAX_VMDQ_INDICES		64
#define IXGBE_MAX_FDIR_INDICES		63	/* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES		8
#define MAX_RX_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES		4
#define IXGBE_BAD_L2A_QUEUE		3
#define IXGBE_MAX_MACVLANS		31
#define IXGBE_MAX_DCBMACVLANS		8
329

330
struct ixgbe_ring_feature {
331 332
	u16 limit;	/* upper limit on feature indices */
	u16 indices;	/* current value of indices */
333 334
	u16 mask;	/* Mask used for feature to ring mapping */
	u16 offset;	/* offset to start of feature */
J
Jesse Brandeburg 已提交
335
} ____cacheline_internodealigned_in_smp;
336

337 338 339 340
#define IXGBE_82599_VMDQ_8Q_MASK 0x78
#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
#define IXGBE_82599_VMDQ_2Q_MASK 0x7E

341 342 343 344 345
/*
 * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
 * this is twice the size of a half page we need to double the page order
 * for FCoE enabled Rx queues.
 */
346
static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
347
{
348 349 350 351 352 353
#ifdef IXGBE_FCOE
	if (test_bit(__IXGBE_RX_FCOE, &ring->state))
		return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
					    IXGBE_RXBUFFER_3K;
#endif
	return IXGBE_RXBUFFER_2K;
354
}
355 356 357 358 359 360

static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
{
#ifdef IXGBE_FCOE
	if (test_bit(__IXGBE_RX_FCOE, &ring->state))
		return (PAGE_SIZE < 8192) ? 1 : 0;
361
#endif
362 363
	return 0;
}
364 365
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))

366
struct ixgbe_ring_container {
367
	struct ixgbe_ring *ring;	/* pointer to linked list of rings */
368 369 370
	unsigned int total_bytes;	/* total bytes processed this int */
	unsigned int total_packets;	/* total packets processed this int */
	u16 work_limit;			/* total work allowed per interrupt */
371 372 373
	u8 count;			/* total number of rings in vector */
	u8 itr;				/* current ITR setting for ring */
};
374

375 376 377 378
/* iterator for handling rings in ring container */
#define ixgbe_for_each_ring(pos, head) \
	for (pos = (head).ring; pos != NULL; pos = pos->next)

379
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
380
			      ? 8 : 1)
381 382
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS

383
/* MAX_Q_VECTORS of these are allocated,
384 385 386 387
 * but we only use one per queue-specific vector.
 */
struct ixgbe_q_vector {
	struct ixgbe_adapter *adapter;
388 389 390
#ifdef CONFIG_IXGBE_DCA
	int cpu;	    /* CPU for DCA */
#endif
391 392 393 394
	u16 v_idx;		/* index of q_vector within array, also used for
				 * finding the bit in EICR and friends that
				 * represents the vector for this ring */
	u16 itr;		/* Interrupt throttle rate written to EITR */
395
	struct ixgbe_ring_container rx, tx;
396 397

	struct napi_struct napi;
398 399 400
	cpumask_t affinity_mask;
	int numa_node;
	struct rcu_head rcu;	/* to avoid race with update stats on free */
401
	char name[IFNAMSIZ + 9];
402

403
#ifdef CONFIG_NET_RX_BUSY_POLL
404
	atomic_t state;
405
#endif  /* CONFIG_NET_RX_BUSY_POLL */
406

407 408
	/* for dynamic allocation of rings associated with this q_vector */
	struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
409
};
410

411
#ifdef CONFIG_NET_RX_BUSY_POLL
412 413 414 415 416 417 418
enum ixgbe_qv_state_t {
	IXGBE_QV_STATE_IDLE = 0,
	IXGBE_QV_STATE_NAPI,
	IXGBE_QV_STATE_POLL,
	IXGBE_QV_STATE_DISABLE
};

419 420
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
421 422
	/* reset state to idle */
	atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
423 424 425 426 427
}

/* called from the device poll routine to get ownership of a q_vector */
static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
{
428 429
	int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
				IXGBE_QV_STATE_NAPI);
430
#ifdef BP_EXTENDED_STATS
431
	if (rc != IXGBE_QV_STATE_IDLE)
432 433
		q_vector->tx.ring->stats.yields++;
#endif
434 435

	return rc == IXGBE_QV_STATE_IDLE;
436 437 438
}

/* returns true is someone tried to get the qv while napi had it */
439
static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
440
{
441 442 443 444 445 446 447 448
	WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI);

	/* flush any outstanding Rx frames */
	if (q_vector->napi.gro_list)
		napi_gro_flush(&q_vector->napi, false);

	/* reset state to idle */
	atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
449 450 451 452 453
}

/* called from ixgbe_low_latency_poll() */
static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
{
454 455
	int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
				IXGBE_QV_STATE_POLL);
456
#ifdef BP_EXTENDED_STATS
457 458
	if (rc != IXGBE_QV_STATE_IDLE)
		q_vector->tx.ring->stats.yields++;
459
#endif
460
	return rc == IXGBE_QV_STATE_IDLE;
461 462 463
}

/* returns true if someone tried to get the qv while it was locked */
464
static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
465
{
466 467 468 469
	WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL);

	/* reset state to idle */
	atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
470 471 472
}

/* true if a socket is polling, even if it did not get the lock */
473
static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
474
{
475
	return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL;
476
}
477 478 479 480

/* false if QV is currently owned */
static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
{
481 482 483 484
	int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
				IXGBE_QV_STATE_DISABLE);

	return rc == IXGBE_QV_STATE_IDLE;
485 486
}

487
#else /* CONFIG_NET_RX_BUSY_POLL */
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
}

static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
{
	return true;
}

static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
{
	return false;
}

static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
{
	return false;
}

static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
{
	return false;
}

512
static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
513 514 515
{
	return false;
}
516 517 518 519 520 521

static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
{
	return true;
}

522
#endif /* CONFIG_NET_RX_BUSY_POLL */
523

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
#ifdef CONFIG_IXGBE_HWMON

#define IXGBE_HWMON_TYPE_LOC		0
#define IXGBE_HWMON_TYPE_TEMP		1
#define IXGBE_HWMON_TYPE_CAUTION	2
#define IXGBE_HWMON_TYPE_MAX		3

struct hwmon_attr {
	struct device_attribute dev_attr;
	struct ixgbe_hw *hw;
	struct ixgbe_thermal_diode_data *sensor;
	char name[12];
};

struct hwmon_buff {
539 540 541 542
	struct attribute_group group;
	const struct attribute_group *groups[2];
	struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
	struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
543 544 545
	unsigned int n_hwmon;
};
#endif /* CONFIG_IXGBE_HWMON */
546

547 548 549
/*
 * microsecond values for various ITR rates shifted by 2 to fit itr register
 * with the first 3 bits reserved 0
550
 */
551 552 553
#define IXGBE_MIN_RSC_ITR	24
#define IXGBE_100K_ITR		40
#define IXGBE_20K_ITR		200
554
#define IXGBE_12K_ITR		336
555

556 557 558 559 560 561 562
/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
					const u32 stat_err_bits)
{
	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
}

563 564 565 566 567 568 569
static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
{
	u16 ntc = ring->next_to_clean;
	u16 ntu = ring->next_to_use;

	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
570

571
#define IXGBE_RX_DESC(R, i)	    \
572
	(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
573
#define IXGBE_TX_DESC(R, i)	    \
574
	(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
575
#define IXGBE_TX_CTXTDESC(R, i)	    \
576
	(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
577

578
#define IXGBE_MAX_JUMBO_FRAME_SIZE	9728 /* Maximum Supported Size 9.5KB */
579 580 581 582
#ifdef IXGBE_FCOE
/* Use 3K as the baby jumbo frame size for FCoE */
#define IXGBE_FCOE_JUMBO_FRAME_SIZE       3072
#endif /* IXGBE_FCOE */
583

584 585 586
#define OTHER_VECTOR 1
#define NON_Q_VECTORS (OTHER_VECTOR)

587
#define MAX_MSIX_VECTORS_82599 64
588
#define MAX_Q_VECTORS_82599 64
589
#define MAX_MSIX_VECTORS_82598 18
590
#define MAX_Q_VECTORS_82598 16
591

592 593
struct ixgbe_mac_addr {
	u8 addr[ETH_ALEN];
594
	u16 pool;
595 596
	u16 state; /* bitmask */
};
597

598 599 600 601
#define IXGBE_MAC_STATE_DEFAULT		0x1
#define IXGBE_MAC_STATE_MODIFIED	0x2
#define IXGBE_MAC_STATE_IN_USE		0x4

602
#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
603
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
604

605
#define MIN_MSIX_Q_VECTORS 1
606 607
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)

608 609
/* default to trying for four seconds */
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
M
Mark Rustad 已提交
610
#define IXGBE_SFP_POLL_JIFFIES (2 * HZ)	/* SFP poll every 2 seconds */
611

612 613
/* board specific private data structure */
struct ixgbe_adapter {
614 615 616 617 618
	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
	/* OS defined structs */
	struct net_device *netdev;
	struct pci_dev *pdev;

619 620 621 622 623 624
	unsigned long state;

	/* Some features need tri-state capability,
	 * thus the additional *_CAPABLE flags.
	 */
	u32 flags;
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 1)
#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 3)
#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 4)
#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 5)
#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 6)
#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 8)
#define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 9)
#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 10)
#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 11)
#define IXGBE_FLAG_DCB_ENABLED                  (u32)(1 << 12)
#define IXGBE_FLAG_VMDQ_CAPABLE                 (u32)(1 << 13)
#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 14)
#define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 15)
#define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 16)
#define IXGBE_FLAG_NEED_LINK_CONFIG             (u32)(1 << 17)
#define IXGBE_FLAG_FDIR_HASH_CAPABLE            (u32)(1 << 18)
#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         (u32)(1 << 19)
#define IXGBE_FLAG_FCOE_CAPABLE                 (u32)(1 << 20)
#define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 21)
#define IXGBE_FLAG_SRIOV_CAPABLE                (u32)(1 << 22)
#define IXGBE_FLAG_SRIOV_ENABLED                (u32)(1 << 23)
646
#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE	BIT(24)
647 648
#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED		BIT(25)
#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER	BIT(26)
649 650

	u32 flags2;
651
#define IXGBE_FLAG2_RSC_CAPABLE                 (u32)(1 << 0)
652 653
#define IXGBE_FLAG2_RSC_ENABLED                 (u32)(1 << 1)
#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE         (u32)(1 << 2)
654
#define IXGBE_FLAG2_TEMP_SENSOR_EVENT           (u32)(1 << 3)
655 656
#define IXGBE_FLAG2_SEARCH_FOR_SFP              (u32)(1 << 4)
#define IXGBE_FLAG2_SFP_NEEDS_RESET             (u32)(1 << 5)
657
#define IXGBE_FLAG2_RESET_REQUESTED             (u32)(1 << 6)
658
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT        (u32)(1 << 7)
659 660
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP		(u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		(u32)(1 << 9)
661
#define IXGBE_FLAG2_PTP_PPS_ENABLED		(u32)(1 << 10)
662
#define IXGBE_FLAG2_PHY_INTERRUPT		(u32)(1 << 11)
663 664 665
#ifdef CONFIG_IXGBE_VXLAN
#define IXGBE_FLAG2_VXLAN_REREG_NEEDED		BIT(12)
#endif
666

667 668 669
	/* Tx fast path data */
	int num_tx_queues;
	u16 tx_itr_setting;
670 671
	u16 tx_work_limit;

672 673 674 675
	/* Rx fast path data */
	int num_rx_queues;
	u16 rx_itr_setting;

676
	/* TX */
677
	struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
678

J
Jesse Brandeburg 已提交
679 680
	u64 restart_queue;
	u64 lsc_int;
681
	u32 tx_timeout_count;
J
Jesse Brandeburg 已提交
682

683
	/* RX */
684
	struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
685 686
	int num_rx_pools;		/* == num_rx_queues in 82598 */
	int num_rx_queues_per_pool;	/* 1 if 82598, can be many if 82599 */
687
	u64 hw_csum_rx_error;
688
	u64 hw_rx_no_dma_resources;
689 690
	u64 rsc_total_count;
	u64 rsc_total_flush;
691 692 693 694
	u64 non_eop_descs;
	u32 alloc_rx_page_failed;
	u32 alloc_rx_buff_failed;

695
	struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
696

697 698 699 700 701 702 703 704 705
	/* DCB parameters */
	struct ieee_pfc *ixgbe_ieee_pfc;
	struct ieee_ets *ixgbe_ieee_ets;
	struct ixgbe_dcb_config dcb_cfg;
	struct ixgbe_dcb_config temp_dcb_cfg;
	u8 dcb_set_bitmap;
	u8 dcbx_cap;
	enum ixgbe_fc_mode last_lfc_mode;

706 707
	int num_q_vectors;	/* current number of q_vectors for device */
	int max_q_vectors;	/* true count of q_vectors for device */
708 709
	struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
	struct msix_entry *msix_entries;
710

711 712 713 714
	u32 test_icr;
	struct ixgbe_ring test_tx_ring;
	struct ixgbe_ring test_rx_ring;

715 716 717 718
	/* structs defined in ixgbe_hw.h */
	struct ixgbe_hw hw;
	u16 msg_enable;
	struct ixgbe_hw_stats stats;
719

720
	u64 tx_busy;
721 722
	unsigned int tx_ring_count;
	unsigned int rx_ring_count;
723 724 725

	u32 link_speed;
	bool link_up;
M
Mark Rustad 已提交
726
	unsigned long sfp_poll_time;
727 728
	unsigned long link_check_timeout;

729
	struct timer_list service_timer;
730 731 732 733 734 735
	struct work_struct service_task;

	struct hlist_head fdir_filter_list;
	unsigned long fdir_overflow; /* number of times ATR was backed off */
	union ixgbe_atr_input fdir_mask;
	int fdir_filter_count;
736 737 738
	u32 fdir_pballoc;
	u32 atr_sample_rate;
	spinlock_t fdir_perfect_lock;
739

740 741 742
#ifdef IXGBE_FCOE
	struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
743
	u8 __iomem *io_addr; /* Mainly for iounmap use */
744
	u32 wol;
745

746 747
	u16 bridge_mode;

748 749
	u16 eeprom_verh;
	u16 eeprom_verl;
E
Emil Tantilov 已提交
750
	u16 eeprom_cap;
751

752
	u32 interrupt_event;
753
	u32 led_reg;
754

755 756
	struct ptp_clock *ptp_clock;
	struct ptp_clock_info ptp_caps;
757 758
	struct work_struct ptp_tx_work;
	struct sk_buff *ptp_tx_skb;
759
	struct hwtstamp_config tstamp_config;
760
	unsigned long ptp_tx_start;
761
	unsigned long last_overflow_check;
762
	unsigned long last_rx_ptp_check;
763
	unsigned long last_rx_timestamp;
764
	spinlock_t tmreg_lock;
765 766
	struct cyclecounter hw_cc;
	struct timecounter hw_tc;
767
	u32 base_incval;
768 769 770
	u32 tx_hwtstamp_timeouts;
	u32 rx_hwtstamp_cleared;
	void (*ptp_setup_sdp)(struct ixgbe_adapter *);
771

772 773 774 775
	/* SR-IOV */
	DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
	unsigned int num_vfs;
	struct vf_data_storage *vfinfo;
776
	int vf_rate_link_speed;
G
Greg Rose 已提交
777 778
	struct vf_macvlans vf_mvs;
	struct vf_macvlans *mv_list;
779

780 781
	u32 timer_event_accumulator;
	u32 vferr_refcount;
782
	struct ixgbe_mac_addr *mac_table;
783
#ifdef CONFIG_IXGBE_VXLAN
784
	u16 vxlan_port;
785
#endif
786 787
	struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
788
	struct hwmon_buff *ixgbe_hwmon_buff;
789
#endif /* CONFIG_IXGBE_HWMON */
C
Catherine Sullivan 已提交
790 791 792
#ifdef CONFIG_DEBUG_FS
	struct dentry *ixgbe_dbg_adapter;
#endif /*CONFIG_DEBUG_FS*/
793 794

	u8 default_up;
795
	unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
796 797 798 799 800 801 802 803 804

/* maximum number of RETA entries among all devices supported by ixgbe
 * driver: currently it's x550 device in non-SRIOV mode
 */
#define IXGBE_MAX_RETA_ENTRIES 512
	u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];

#define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
	u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
805 806
};

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
{
	switch (adapter->hw.mac.type) {
	case ixgbe_mac_82598EB:
	case ixgbe_mac_82599EB:
	case ixgbe_mac_X540:
		return IXGBE_MAX_RSS_INDICES;
	case ixgbe_mac_X550:
	case ixgbe_mac_X550EM_x:
		return IXGBE_MAX_RSS_INDICES_X550;
	default:
		return 0;
	}
}

822 823 824 825 826
struct ixgbe_fdir_filter {
	struct hlist_node fdir_node;
	union ixgbe_atr_input filter;
	u16 sw_idx;
	u16 action;
827 828
};

829
enum ixgbe_state_t {
830 831
	__IXGBE_TESTING,
	__IXGBE_RESETTING,
D
Donald Skidmore 已提交
832
	__IXGBE_DOWN,
833
	__IXGBE_DISABLED,
834
	__IXGBE_REMOVING,
835
	__IXGBE_SERVICE_SCHED,
836
	__IXGBE_SERVICE_INITED,
837
	__IXGBE_IN_SFP_INIT,
838
	__IXGBE_PTP_RUNNING,
839
	__IXGBE_PTP_TX_IN_PROGRESS,
840 841
};

A
Alexander Duyck 已提交
842 843 844 845 846
struct ixgbe_cb {
	union {				/* Union defining head/tail partner */
		struct sk_buff *head;
		struct sk_buff *tail;
	};
847
	dma_addr_t dma;
A
Alexander Duyck 已提交
848
	u16 append_cnt;
849
	bool page_released;
850
};
A
Alexander Duyck 已提交
851
#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
852

853
enum ixgbe_boards {
854
	board_82598,
855
	board_82599,
856
	board_X540,
857 858
	board_X550,
	board_X550EM_x,
859 860
};

861
extern struct ixgbe_info ixgbe_82598_info;
862
extern struct ixgbe_info ixgbe_82599_info;
863
extern struct ixgbe_info ixgbe_X540_info;
864 865
extern struct ixgbe_info ixgbe_X550_info;
extern struct ixgbe_info ixgbe_X550EM_x_info;
J
Jeff Kirsher 已提交
866
#ifdef CONFIG_IXGBE_DCB
867
extern const struct dcbnl_rtnl_ops dcbnl_ops;
868
#endif
869 870

extern char ixgbe_driver_name[];
S
Stephen Hemminger 已提交
871
extern const char ixgbe_driver_version[];
872
#ifdef IXGBE_FCOE
873
extern char ixgbe_default_device_descr[];
874
#endif /* IXGBE_FCOE */
875

876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
void ixgbe_up(struct ixgbe_adapter *adapter);
void ixgbe_down(struct ixgbe_adapter *adapter);
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
void ixgbe_reset(struct ixgbe_adapter *adapter);
void ixgbe_set_ethtool_ops(struct net_device *netdev);
int ixgbe_setup_rx_resources(struct ixgbe_ring *);
int ixgbe_setup_tx_resources(struct ixgbe_ring *);
void ixgbe_free_rx_resources(struct ixgbe_ring *);
void ixgbe_free_tx_resources(struct ixgbe_ring *);
void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
891
			       u16 subdevice_id);
892 893 894 895
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
#endif
int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
896
			 const u8 *addr, u16 queue);
897
int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
898
			 const u8 *addr, u16 queue);
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
				  struct ixgbe_ring *);
void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
				      struct ixgbe_tx_buffer *);
void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
void ixgbe_write_eitr(struct ixgbe_q_vector *);
int ixgbe_poll(struct napi_struct *napi, int budget);
int ethtool_ioctl(struct ifreq *ifr);
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
					  union ixgbe_atr_hash_dword input,
					  union ixgbe_atr_hash_dword common,
					  u8 queue);
s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
				    union ixgbe_atr_input *input_mask);
s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
					  union ixgbe_atr_input *input,
					  u16 soft_id, u8 queue);
s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
					  union ixgbe_atr_input *input,
					  u16 soft_id);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
					  union ixgbe_atr_input *mask);
void ixgbe_set_rx_mode(struct net_device *netdev);
926
#ifdef CONFIG_IXGBE_DCB
927
void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
928
#endif
929 930 931
int ixgbe_setup_tc(struct net_device *dev, u8 tc);
void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
void ixgbe_do_reset(struct net_device *netdev);
932
#ifdef CONFIG_IXGBE_HWMON
933 934
void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
935
#endif /* CONFIG_IXGBE_HWMON */
936
#ifdef IXGBE_FCOE
937 938 939 940 941 942 943 944 945 946 947 948 949 950
void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
	      u8 *hdr_len);
int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
		   union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
		       struct scatterlist *sgl, unsigned int sgc);
int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
			  struct scatterlist *sgl, unsigned int sgc);
int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
int ixgbe_fcoe_enable(struct net_device *netdev);
int ixgbe_fcoe_disable(struct net_device *netdev);
951
#ifdef CONFIG_IXGBE_DCB
952 953
u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
954
#endif /* CONFIG_IXGBE_DCB */
955 956 957 958
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
			   struct netdev_fcoe_hbainfo *info);
u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
959
#endif /* IXGBE_FCOE */
C
Catherine Sullivan 已提交
960
#ifdef CONFIG_DEBUG_FS
961 962 963 964
void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
void ixgbe_dbg_init(void);
void ixgbe_dbg_exit(void);
965 966 967 968 969
#else
static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
static inline void ixgbe_dbg_init(void) {}
static inline void ixgbe_dbg_exit(void) {}
C
Catherine Sullivan 已提交
970
#endif /* CONFIG_DEBUG_FS */
971 972 973 974 975
static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
{
	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}

976
void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
977
void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
978 979 980
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
					 union ixgbe_adv_rx_desc *rx_desc,
					 struct sk_buff *skb)
{
	if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) {
		ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb);
		return;
	}

	if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
		return;

	ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);

	/* Update the last_rx_timestamp timer in order to enable watchdog check
	 * for error case of latched timestamp on a dropped packet.
	 */
	rx_ring->last_rx_timestamp = jiffies;
}

1003 1004
int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
1005 1006
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
1007
void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
1008 1009 1010
#ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
#endif
1011

1012 1013 1014
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
				  struct ixgbe_adapter *adapter,
				  struct ixgbe_ring *tx_ring);
1015
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
1016
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
1017
#endif /* _IXGBE_H_ */