netdevice.h 124.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Definitions for the Interfaces handler.
 *
 * Version:	@(#)dev.h	1.0.10	08/12/93
 *
10
 * Authors:	Ross Biro
L
Linus Torvalds 已提交
11 12 13
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14
 *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27
 *		Bjorn Ekwall. <bj0rn@blox.se>
 *              Pekka Riikonen <priikone@poseidon.pspt.fi>
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 *		Moved to /usr/include/linux for NET3
 */
#ifndef _LINUX_NETDEVICE_H
#define _LINUX_NETDEVICE_H

A
Al Viro 已提交
28
#include <linux/timer.h>
29
#include <linux/bug.h>
30
#include <linux/delay.h>
A
Arun Sharma 已提交
31
#include <linux/atomic.h>
32
#include <linux/prefetch.h>
L
Linus Torvalds 已提交
33 34 35 36
#include <asm/cache.h>
#include <asm/byteorder.h>

#include <linux/percpu.h>
37
#include <linux/rculist.h>
38
#include <linux/dmaengine.h>
39
#include <linux/workqueue.h>
T
Tom Herbert 已提交
40
#include <linux/dynamic_queue_limits.h>
L
Linus Torvalds 已提交
41

42
#include <linux/ethtool.h>
43
#include <net/net_namespace.h>
44
#include <net/dsa.h>
J
Jeff Kirsher 已提交
45
#ifdef CONFIG_DCB
46 47
#include <net/dcbnl.h>
#endif
48
#include <net/netprio_cgroup.h>
49

50
#include <linux/netdev_features.h>
51
#include <linux/neighbour.h>
52
#include <uapi/linux/netdevice.h>
53
#include <uapi/linux/if_bonding.h>
54

55
struct netpoll_info;
56
struct device;
57
struct phy_device;
58 59
/* 802.11 specific */
struct wireless_dev;
60 61
/* 802.15.4 specific */
struct wpan_dev;
R
Robert Shearman 已提交
62
struct mpls_dev;
L
Linus Torvalds 已提交
63

64 65
void netdev_set_default_ethtool_ops(struct net_device *dev,
				    const struct ethtool_ops *ops);
66

67 68 69 70
/* Backlog congestion levels */
#define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
#define NET_RX_DROP		1	/* packet dropped */

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
/*
 * Transmit return codes: transmit return codes originate from three different
 * namespaces:
 *
 * - qdisc return codes
 * - driver transmit return codes
 * - errno values
 *
 * Drivers are allowed to return any one of those in their hard_start_xmit()
 * function. Real network devices commonly used with qdiscs should only return
 * the driver transmit return codes though - when qdiscs are used, the actual
 * transmission happens asynchronously, so the value is not propagated to
 * higher layers. Virtual network devices transmit synchronously, in this case
 * the driver transmit return codes are consumed by dev_queue_xmit(), all
 * others are propagated to higher layers.
 */

/* qdisc ->enqueue() return codes. */
#define NET_XMIT_SUCCESS	0x00
90 91 92 93
#define NET_XMIT_DROP		0x01	/* skb dropped			*/
#define NET_XMIT_CN		0x02	/* congestion notification	*/
#define NET_XMIT_POLICED	0x03	/* skb is shot by police	*/
#define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */
L
Linus Torvalds 已提交
94

95 96 97
/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
 * indicates that the device will soon be dropping packets, or already drops
 * some packets of the same priority; prompting us to send less aggressively. */
98
#define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e))
L
Linus Torvalds 已提交
99 100
#define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)

101
/* Driver transmit return codes */
102
#define NETDEV_TX_MASK		0xf0
103

104
enum netdev_tx {
105
	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */
106 107 108
	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */
	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/
	NETDEV_TX_LOCKED = 0x20,	/* driver tx lock was already taken */
109 110 111
};
typedef enum netdev_tx netdev_tx_t;

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
/*
 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
 */
static inline bool dev_xmit_complete(int rc)
{
	/*
	 * Positive cases with an skb consumed by a driver:
	 * - successful transmission (rc == NETDEV_TX_OK)
	 * - error while transmitting (rc < 0)
	 * - error while queueing to a different device (rc & NET_XMIT_MASK)
	 */
	if (likely(rc < NET_XMIT_MASK))
		return true;

	return false;
}

L
Linus Torvalds 已提交
130 131 132 133
/*
 *	Compute the worst case header length according to the protocols
 *	used.
 */
G
Graf Yang 已提交
134

135
#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
136 137 138 139 140
# if defined(CONFIG_MAC80211_MESH)
#  define LL_MAX_HEADER 128
# else
#  define LL_MAX_HEADER 96
# endif
L
Linus Torvalds 已提交
141
#else
142
# define LL_MAX_HEADER 32
L
Linus Torvalds 已提交
143 144
#endif

145 146
#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
    !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
L
Linus Torvalds 已提交
147 148 149 150 151 152
#define MAX_HEADER LL_MAX_HEADER
#else
#define MAX_HEADER (LL_MAX_HEADER + 48)
#endif

/*
153 154
 *	Old network device statistics. Fields are native words
 *	(unsigned long) so they can be read and written atomically.
L
Linus Torvalds 已提交
155
 */
G
Graf Yang 已提交
156

E
Eric Dumazet 已提交
157
struct net_device_stats {
158 159 160 161 162 163 164 165 166
	unsigned long	rx_packets;
	unsigned long	tx_packets;
	unsigned long	rx_bytes;
	unsigned long	tx_bytes;
	unsigned long	rx_errors;
	unsigned long	tx_errors;
	unsigned long	rx_dropped;
	unsigned long	tx_dropped;
	unsigned long	multicast;
L
Linus Torvalds 已提交
167 168
	unsigned long	collisions;
	unsigned long	rx_length_errors;
169 170 171 172 173
	unsigned long	rx_over_errors;
	unsigned long	rx_crc_errors;
	unsigned long	rx_frame_errors;
	unsigned long	rx_fifo_errors;
	unsigned long	rx_missed_errors;
L
Linus Torvalds 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186
	unsigned long	tx_aborted_errors;
	unsigned long	tx_carrier_errors;
	unsigned long	tx_fifo_errors;
	unsigned long	tx_heartbeat_errors;
	unsigned long	tx_window_errors;
	unsigned long	rx_compressed;
	unsigned long	tx_compressed;
};


#include <linux/cache.h>
#include <linux/skbuff.h>

187
#ifdef CONFIG_RPS
188 189
#include <linux/static_key.h>
extern struct static_key rps_needed;
190 191
#endif

L
Linus Torvalds 已提交
192 193 194 195
struct neighbour;
struct neigh_parms;
struct sk_buff;

196 197 198 199
struct netdev_hw_addr {
	struct list_head	list;
	unsigned char		addr[MAX_ADDR_LEN];
	unsigned char		type;
J
Jiri Pirko 已提交
200 201 202 203
#define NETDEV_HW_ADDR_T_LAN		1
#define NETDEV_HW_ADDR_T_SAN		2
#define NETDEV_HW_ADDR_T_SLAVE		3
#define NETDEV_HW_ADDR_T_UNICAST	4
204 205
#define NETDEV_HW_ADDR_T_MULTICAST	5
	bool			global_use;
206
	int			sync_cnt;
207
	int			refcount;
208
	int			synced;
209 210 211
	struct rcu_head		rcu_head;
};

212 213 214 215 216
struct netdev_hw_addr_list {
	struct list_head	list;
	int			count;
};

217 218 219 220
#define netdev_hw_addr_list_count(l) ((l)->count)
#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
#define netdev_hw_addr_list_for_each(ha, l) \
	list_for_each_entry(ha, &(l)->list, list)
221

222 223 224 225
#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
#define netdev_for_each_uc_addr(ha, dev) \
	netdev_hw_addr_list_for_each(ha, &(dev)->uc)
226

227 228
#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
229
#define netdev_for_each_mc_addr(ha, dev) \
230
	netdev_hw_addr_list_for_each(ha, &(dev)->mc)
231

E
Eric Dumazet 已提交
232
struct hh_cache {
233
	u16		hh_len;
234
	u16		__pad;
235
	seqlock_t	hh_lock;
L
Linus Torvalds 已提交
236 237 238 239

	/* cached hardware header; allow for machine alignment needs.        */
#define HH_DATA_MOD	16
#define HH_DATA_OFF(__len) \
J
Jiri Benc 已提交
240
	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
L
Linus Torvalds 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253 254
#define HH_DATA_ALIGN(__len) \
	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
};

/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
 * Alternative is:
 *   dev->hard_header_len ? (dev->hard_header_len +
 *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
 *
 * We could use other alignment values, but we must maintain the
 * relationship HH alignment <= LL alignment.
 */
#define LL_RESERVED_SPACE(dev) \
255
	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
L
Linus Torvalds 已提交
256
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
257
	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
L
Linus Torvalds 已提交
258

259 260 261
struct header_ops {
	int	(*create) (struct sk_buff *skb, struct net_device *dev,
			   unsigned short type, const void *daddr,
262
			   const void *saddr, unsigned int len);
263
	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
264
	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
265 266 267 268 269
	void	(*cache_update)(struct hh_cache *hh,
				const struct net_device *dev,
				const unsigned char *haddr);
};

L
Linus Torvalds 已提交
270 271 272 273 274
/* These flag bits are private to the generic network queueing
 * layer, they may not be explicitly referenced by any other
 * code.
 */

E
Eric Dumazet 已提交
275
enum netdev_state_t {
L
Linus Torvalds 已提交
276 277 278
	__LINK_STATE_START,
	__LINK_STATE_PRESENT,
	__LINK_STATE_NOCARRIER,
S
Stefan Rompf 已提交
279 280
	__LINK_STATE_LINKWATCH_PENDING,
	__LINK_STATE_DORMANT,
L
Linus Torvalds 已提交
281 282 283 284 285
};


/*
 * This structure holds at boot time configured netdevice settings. They
G
Graf Yang 已提交
286
 * are then used in the device probing.
L
Linus Torvalds 已提交
287 288 289 290 291 292 293
 */
struct netdev_boot_setup {
	char name[IFNAMSIZ];
	struct ifmap map;
};
#define NETDEV_BOOT_SETUP_MAX 8

294
int __init netdev_boot_setup(char *str);
L
Linus Torvalds 已提交
295

296 297 298 299 300 301 302 303 304 305 306 307 308 309
/*
 * Structure for NAPI scheduling similar to tasklet but with weighting
 */
struct napi_struct {
	/* The poll_list must only be managed by the entity which
	 * changes the state of the NAPI_STATE_SCHED bit.  This means
	 * whoever atomically sets that bit can add this napi_struct
	 * to the per-cpu poll_list, and whoever clears that bit
	 * can remove from the list right before clearing the bit.
	 */
	struct list_head	poll_list;

	unsigned long		state;
	int			weight;
310
	unsigned int		gro_count;
311 312 313 314 315
	int			(*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
	spinlock_t		poll_lock;
	int			poll_owner;
#endif
H
Herbert Xu 已提交
316
	struct net_device	*dev;
317
	struct sk_buff		*gro_list;
H
Herbert Xu 已提交
318
	struct sk_buff		*skb;
319
	struct hrtimer		timer;
320
	struct list_head	dev_list;
E
Eliezer Tamir 已提交
321 322
	struct hlist_node	napi_hash_node;
	unsigned int		napi_id;
323 324
};

E
Eric Dumazet 已提交
325
enum {
326
	NAPI_STATE_SCHED,	/* Poll is scheduled */
D
David S. Miller 已提交
327
	NAPI_STATE_DISABLE,	/* Disable pending */
328
	NAPI_STATE_NPSVC,	/* Netpoll - don't dequeue from poll_list */
E
Eric Dumazet 已提交
329 330
	NAPI_STATE_HASHED,	/* In NAPI hash (busy polling possible) */
	NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
331 332
};

333
enum gro_result {
334 335 336 337 338 339
	GRO_MERGED,
	GRO_MERGED_FREE,
	GRO_HELD,
	GRO_NORMAL,
	GRO_DROP,
};
340
typedef enum gro_result gro_result_t;
341

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
/*
 * enum rx_handler_result - Possible return values for rx_handlers.
 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
 * further.
 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
 * case skb->dev was changed by rx_handler.
 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
 *
 * rx_handlers are functions called from inside __netif_receive_skb(), to do
 * special processing of the skb, prior to delivery to protocol handlers.
 *
 * Currently, a net_device can only have a single rx_handler registered. Trying
 * to register a second rx_handler will return -EBUSY.
 *
 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
 * To unregister a rx_handler on a net_device, use
 * netdev_rx_handler_unregister().
 *
 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
 * do with the skb.
 *
 * If the rx_handler consumed to skb in some way, it should return
 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
 * the skb to be delivered in some other ways.
 *
 * If the rx_handler changed skb->dev, to divert the skb to another
 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
 * new device will be called if it exists.
 *
 * If the rx_handler consider the skb should be ignored, it should return
 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
374
 * are registered on exact device (ptype->dev == skb->dev).
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
 *
 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
 * delivered, it should return RX_HANDLER_PASS.
 *
 * A device without a registered rx_handler will behave as if rx_handler
 * returned RX_HANDLER_PASS.
 */

enum rx_handler_result {
	RX_HANDLER_CONSUMED,
	RX_HANDLER_ANOTHER,
	RX_HANDLER_EXACT,
	RX_HANDLER_PASS,
};
typedef enum rx_handler_result rx_handler_result_t;
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
391

392
void __napi_schedule(struct napi_struct *n);
393
void __napi_schedule_irqoff(struct napi_struct *n);
394

395
static inline bool napi_disable_pending(struct napi_struct *n)
D
David S. Miller 已提交
396 397 398 399
{
	return test_bit(NAPI_STATE_DISABLE, &n->state);
}

400 401 402 403 404 405
/**
 *	napi_schedule_prep - check if napi can be scheduled
 *	@n: napi context
 *
 * Test if NAPI routine is already running, and if not mark
 * it as running.  This is used as a condition variable
D
David S. Miller 已提交
406 407
 * insure only one NAPI poll instance runs.  We also make
 * sure there is no pending NAPI disable.
408
 */
409
static inline bool napi_schedule_prep(struct napi_struct *n)
410
{
D
David S. Miller 已提交
411 412
	return !napi_disable_pending(n) &&
		!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
}

/**
 *	napi_schedule - schedule NAPI poll
 *	@n: napi context
 *
 * Schedule NAPI poll routine to be called if it is not already
 * running.
 */
static inline void napi_schedule(struct napi_struct *n)
{
	if (napi_schedule_prep(n))
		__napi_schedule(n);
}

428 429 430 431 432 433 434 435 436 437 438 439
/**
 *	napi_schedule_irqoff - schedule NAPI poll
 *	@n: napi context
 *
 * Variant of napi_schedule(), assuming hard irqs are masked.
 */
static inline void napi_schedule_irqoff(struct napi_struct *n)
{
	if (napi_schedule_prep(n))
		__napi_schedule_irqoff(n);
}

440
/* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
441
static inline bool napi_reschedule(struct napi_struct *napi)
442 443 444
{
	if (napi_schedule_prep(napi)) {
		__napi_schedule(napi);
445
		return true;
446
	}
447
	return false;
448 449
}

450 451
void __napi_complete(struct napi_struct *n);
void napi_complete_done(struct napi_struct *n, int work_done);
452 453 454 455 456
/**
 *	napi_complete - NAPI processing complete
 *	@n: napi context
 *
 * Mark NAPI processing as complete.
457
 * Consider using napi_complete_done() instead.
458
 */
459 460 461 462
static inline void napi_complete(struct napi_struct *n)
{
	return napi_complete_done(n, 0);
}
463

E
Eliezer Tamir 已提交
464 465 466 467 468 469
/**
 *	napi_hash_add - add a NAPI to global hashtable
 *	@napi: napi context
 *
 * generate a new napi_id and store a @napi under it in napi_hash
 */
470
void napi_hash_add(struct napi_struct *napi);
E
Eliezer Tamir 已提交
471 472 473 474 475 476

/**
 *	napi_hash_del - remove a NAPI from global table
 *	@napi: napi context
 *
 * Warning: caller must observe rcu grace period
477 478
 * before freeing memory containing @napi, if
 * this function returns true.
E
Eliezer Tamir 已提交
479
 */
480
bool napi_hash_del(struct napi_struct *napi);
E
Eliezer Tamir 已提交
481

482 483 484 485 486 487 488
/**
 *	napi_disable - prevent NAPI from scheduling
 *	@n: napi context
 *
 * Stop NAPI from being scheduled on this context.
 * Waits till any outstanding processing completes.
 */
489
void napi_disable(struct napi_struct *n);
490 491 492 493 494 495 496 497 498 499 500

/**
 *	napi_enable - enable NAPI scheduling
 *	@n: napi context
 *
 * Resume NAPI from being scheduled on this context.
 * Must be paired with napi_disable.
 */
static inline void napi_enable(struct napi_struct *n)
{
	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
501
	smp_mb__before_atomic();
502
	clear_bit(NAPI_STATE_SCHED, &n->state);
503
	clear_bit(NAPI_STATE_NPSVC, &n->state);
504 505
}

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
#ifdef CONFIG_SMP
/**
 *	napi_synchronize - wait until NAPI is not running
 *	@n: napi context
 *
 * Wait until NAPI is done being scheduled on this context.
 * Waits till any outstanding processing completes but
 * does not disable future activations.
 */
static inline void napi_synchronize(const struct napi_struct *n)
{
	while (test_bit(NAPI_STATE_SCHED, &n->state))
		msleep(1);
}
#else
# define napi_synchronize(n)	barrier()
#endif

E
Eric Dumazet 已提交
524
enum netdev_queue_state_t {
525 526
	__QUEUE_STATE_DRV_XOFF,
	__QUEUE_STATE_STACK_XOFF,
527
	__QUEUE_STATE_FROZEN,
528
};
529 530 531 532 533 534 535 536 537 538 539

#define QUEUE_STATE_DRV_XOFF	(1 << __QUEUE_STATE_DRV_XOFF)
#define QUEUE_STATE_STACK_XOFF	(1 << __QUEUE_STATE_STACK_XOFF)
#define QUEUE_STATE_FROZEN	(1 << __QUEUE_STATE_FROZEN)

#define QUEUE_STATE_ANY_XOFF	(QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
					QUEUE_STATE_FROZEN)
#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
					QUEUE_STATE_FROZEN)

540 541 542 543 544 545 546 547 548
/*
 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
 * netif_tx_* functions below are used to manipulate this flag.  The
 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
 * queue independently.  The netif_xmit_*stopped functions below are called
 * to check if the queue has been stopped by the driver or stack (either
 * of the XOFF bits are set in the state).  Drivers should not need to call
 * netif_xmit*stopped functions, they should only be using netif_tx_*.
 */
549

550
struct netdev_queue {
551 552 553
/*
 * read mostly part
 */
554
	struct net_device	*dev;
555
	struct Qdisc __rcu	*qdisc;
556
	struct Qdisc		*qdisc_sleeping;
557
#ifdef CONFIG_SYSFS
T
Tom Herbert 已提交
558 559
	struct kobject		kobj;
#endif
560 561 562
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
	int			numa_node;
#endif
563 564 565 566 567
/*
 * write mostly part
 */
	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
	int			xmit_lock_owner;
568 569 570 571
	/*
	 * please use this field instead of dev->trans_start
	 */
	unsigned long		trans_start;
572 573 574 575 576 577

	/*
	 * Number of TX timeouts for this queue
	 * (/sys/class/net/DEV/Q/trans_timeout)
	 */
	unsigned long		trans_timeout;
T
Tom Herbert 已提交
578 579 580 581 582 583

	unsigned long		state;

#ifdef CONFIG_BQL
	struct dql		dql;
#endif
584
	unsigned long		tx_maxrate;
585
} ____cacheline_aligned_in_smp;
586

587 588 589 590 591
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
{
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
	return q->numa_node;
#else
592
	return NUMA_NO_NODE;
593 594 595 596 597 598 599 600 601 602
#endif
}

static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
{
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
	q->numa_node = node;
#endif
}

E
Eric Dumazet 已提交
603
#ifdef CONFIG_RPS
T
Tom Herbert 已提交
604 605 606 607 608 609 610 611 612
/*
 * This structure holds an RPS map which can be of variable length.  The
 * map is an array of CPUs.
 */
struct rps_map {
	unsigned int len;
	struct rcu_head rcu;
	u16 cpus[0];
};
613
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
T
Tom Herbert 已提交
614

T
Tom Herbert 已提交
615
/*
616 617 618
 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
 * tail pointer for that CPU's input queue at the time of last enqueue, and
 * a hardware filter index.
T
Tom Herbert 已提交
619 620 621
 */
struct rps_dev_flow {
	u16 cpu;
622
	u16 filter;
T
Tom Herbert 已提交
623 624
	unsigned int last_qtail;
};
625
#define RPS_NO_FILTER 0xffff
T
Tom Herbert 已提交
626 627 628 629 630 631 632 633 634 635

/*
 * The rps_dev_flow_table structure contains a table of flow mappings.
 */
struct rps_dev_flow_table {
	unsigned int mask;
	struct rcu_head rcu;
	struct rps_dev_flow flows[0];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
636
    ((_num) * sizeof(struct rps_dev_flow)))
T
Tom Herbert 已提交
637 638 639 640

/*
 * The rps_sock_flow_table contains mappings of flows to the last CPU
 * on which they were processed by the application (set in recvmsg).
641 642 643 644 645 646
 * Each entry is a 32bit value. Upper part is the high order bits
 * of flow hash, lower part is cpu number.
 * rps_cpu_mask is used to partition the space, depending on number of
 * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
 * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f,
 * meaning we use 32-6=26 bits for the hash.
T
Tom Herbert 已提交
647 648
 */
struct rps_sock_flow_table {
649
	u32	mask;
650 651

	u32	ents[0] ____cacheline_aligned_in_smp;
T
Tom Herbert 已提交
652
};
653
#define	RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
T
Tom Herbert 已提交
654 655 656

#define RPS_NO_CPU 0xffff

657 658 659
extern u32 rps_cpu_mask;
extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;

T
Tom Herbert 已提交
660 661 662 663
static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
					u32 hash)
{
	if (table && hash) {
664 665
		unsigned int index = hash & table->mask;
		u32 val = hash & ~rps_cpu_mask;
T
Tom Herbert 已提交
666 667

		/* We only give a hint, preemption can change cpu under us */
668
		val |= raw_smp_processor_id();
T
Tom Herbert 已提交
669

670 671
		if (table->ents[index] != val)
			table->ents[index] = val;
T
Tom Herbert 已提交
672 673 674
	}
}

675
#ifdef CONFIG_RFS_ACCEL
676 677
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
			 u16 filter_id);
678
#endif
679
#endif /* CONFIG_RPS */
680

T
Tom Herbert 已提交
681 682
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
683
#ifdef CONFIG_RPS
E
Eric Dumazet 已提交
684 685
	struct rps_map __rcu		*rps_map;
	struct rps_dev_flow_table __rcu	*rps_flow_table;
686
#endif
E
Eric Dumazet 已提交
687
	struct kobject			kobj;
T
Tom Herbert 已提交
688
	struct net_device		*dev;
T
Tom Herbert 已提交
689
} ____cacheline_aligned_in_smp;
690 691 692 693 694 695 696 697 698 699 700

/*
 * RX queue sysfs structures and functions.
 */
struct rx_queue_attribute {
	struct attribute attr;
	ssize_t (*show)(struct netdev_rx_queue *queue,
	    struct rx_queue_attribute *attr, char *buf);
	ssize_t (*store)(struct netdev_rx_queue *queue,
	    struct rx_queue_attribute *attr, const char *buf, size_t len);
};
701

T
Tom Herbert 已提交
702 703 704 705 706 707 708 709 710 711 712
#ifdef CONFIG_XPS
/*
 * This structure holds an XPS map which can be of variable length.  The
 * map is an array of queues.
 */
struct xps_map {
	unsigned int len;
	unsigned int alloc_len;
	struct rcu_head rcu;
	u16 queues[0];
};
713
#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
714 715
#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
       - sizeof(struct xps_map)) / sizeof(u16))
T
Tom Herbert 已提交
716 717 718 719 720 721

/*
 * This structure holds all XPS maps for device.  Maps are indexed by CPU.
 */
struct xps_dev_maps {
	struct rcu_head rcu;
E
Eric Dumazet 已提交
722
	struct xps_map __rcu *cpu_map[0];
T
Tom Herbert 已提交
723 724 725 726 727
};
#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) +		\
    (nr_cpu_ids * sizeof(struct xps_map *)))
#endif /* CONFIG_XPS */

728 729 730 731 732 733 734 735
#define TC_MAX_QUEUE	16
#define TC_BITMASK	15
/* HW offloaded queuing disciplines txq count and offset maps */
struct netdev_tc_txq {
	u16 count;
	u16 offset;
};

736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
/*
 * This structure is to hold information about the device
 * configured to run FCoE protocol stack.
 */
struct netdev_fcoe_hbainfo {
	char	manufacturer[64];
	char	serial_number[64];
	char	hardware_version[64];
	char	driver_version[64];
	char	optionrom_version[64];
	char	firmware_version[64];
	char	model[256];
	char	model_description[256];
};
#endif

753
#define MAX_PHYS_ITEM_ID_LEN 32
754

755 756
/* This structure holds a unique identifier to identify some
 * physical item (port for example) used by a netdevice.
757
 */
758 759
struct netdev_phys_item_id {
	unsigned char id[MAX_PHYS_ITEM_ID_LEN];
760 761 762
	unsigned char id_len;
};

763 764 765 766 767 768 769
static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
					    struct netdev_phys_item_id *b)
{
	return a->id_len == b->id_len &&
	       memcmp(a->id, b->id, a->id_len) == 0;
}

770 771 772
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
				       struct sk_buff *skb);

773 774
/*
 * This structure defines the management hooks for network devices.
775 776
 * The following hooks can be defined; unless noted otherwise, they are
 * optional and can be filled with a null pointer.
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
 *
 * int (*ndo_init)(struct net_device *dev);
 *     This function is called once when network device is registered.
 *     The network device can use this to any late stage initializaton
 *     or semantic validattion. It can fail with an error code which will
 *     be propogated back to register_netdev
 *
 * void (*ndo_uninit)(struct net_device *dev);
 *     This function is called when device is unregistered or when registration
 *     fails. It is not called if init fails.
 *
 * int (*ndo_open)(struct net_device *dev);
 *     This function is called when network device transistions to the up
 *     state.
 *
 * int (*ndo_stop)(struct net_device *dev);
 *     This function is called when network device transistions to the down
 *     state.
 *
796 797
 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
 *                               struct net_device *dev);
798
 *	Called when a packet needs to be transmitted.
799 800 801 802
 *	Returns NETDEV_TX_OK.  Can return NETDEV_TX_BUSY, but you should stop
 *	the queue before that can happen; it's for obsolete devices and weird
 *	corner cases, but the stack really does a non-trivial amount
 *	of useless work if you return NETDEV_TX_BUSY.
803
 *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
804 805
 *	Required can not be NULL.
 *
806
 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
807
 *                         void *accel_priv, select_queue_fallback_t fallback);
808 809 810
 *	Called to decide which queue to when device supports multiple
 *	transmit queues.
 *
811 812 813 814 815 816
 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
 *	This function is called to allow device receiver to make
 *	changes to configuration when multicast or promiscious is enabled.
 *
 * void (*ndo_set_rx_mode)(struct net_device *dev);
 *	This function is called device changes address list filtering.
817 818
 *	If driver handles unicast address filtering, it should set
 *	IFF_UNICAST_FLT to its priv_flags.
819 820 821
 *
 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
 *	This function  is called when the Media Access Control address
822
 *	needs to be changed. If this interface is not defined, the
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
 *	mac address can not be changed.
 *
 * int (*ndo_validate_addr)(struct net_device *dev);
 *	Test if Media Access Control address is valid for the device.
 *
 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
 *	Called when a user request an ioctl which can't be handled by
 *	the generic interface code. If not defined ioctl's return
 *	not supported error code.
 *
 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
 *	Used to set network devices bus interface parameters. This interface
 *	is retained for legacy reason, new devices should use the bus
 *	interface (PCI) for low level management.
 *
 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
 *	Called when a user wants to change the Maximum Transfer Unit
 *	of a device. If not defined, any request to change MTU will
 *	will return an error.
 *
843
 * void (*ndo_tx_timeout)(struct net_device *dev);
844 845 846
 *	Callback uses when the transmitter has not made any progress
 *	for dev->watchdog ticks.
 *
847
 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
848
 *                      struct rtnl_link_stats64 *storage);
849
 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
850
 *	Called when a user wants to get the network device usage
851
 *	statistics. Drivers must do one of the following:
852 853
 *	1. Define @ndo_get_stats64 to fill in a zero-initialised
 *	   rtnl_link_stats64 structure passed by the caller.
854
 *	2. Define @ndo_get_stats to update a net_device_stats structure
855 856 857 858 859
 *	   (which should normally be dev->stats) and return a pointer to
 *	   it. The structure may be changed asynchronously only if each
 *	   field is written atomically.
 *	3. Update dev->stats asynchronously and atomically, and define
 *	   neither operation.
860
 *
861
 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
862 863
 *	If device support VLAN filtering this function is called when a
 *	VLAN id is registered.
864
 *
865
 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
866 867
 *	If device support VLAN filtering this function is called when a
 *	VLAN id is unregistered.
868 869
 *
 * void (*ndo_poll_controller)(struct net_device *dev);
870 871 872 873
 *
 *	SR-IOV management functions.
 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
874 875
 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
 *			  int max_tx_rate);
876
 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
H
Hiroshi Shimamoto 已提交
877
 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
878 879
 * int (*ndo_get_vf_config)(struct net_device *dev,
 *			    int vf, struct ifla_vf_info *ivf);
880
 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
881 882
 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
 *			  struct nlattr *port[]);
883 884 885 886 887
 *
 *      Enable or disable the VF ability to query its RSS Redirection Table and
 *      Hash Key. This is needed since on some devices VF share this information
 *      with PF and querying it may adduce a theoretical security risk.
 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
888
 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
889 890 891 892 893
 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
 * 	Called to setup 'tc' number of traffic classes in the net device. This
 * 	is always called from the stack with the rtnl lock held and netif tx
 * 	queues stopped. This allows the netdevice to perform queue management
 * 	safely.
894
 *
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
 *	Fiber Channel over Ethernet (FCoE) offload functions.
 * int (*ndo_fcoe_enable)(struct net_device *dev);
 *	Called when the FCoE protocol stack wants to start using LLD for FCoE
 *	so the underlying device can perform whatever needed configuration or
 *	initialization to support acceleration of FCoE traffic.
 *
 * int (*ndo_fcoe_disable)(struct net_device *dev);
 *	Called when the FCoE protocol stack wants to stop using LLD for FCoE
 *	so the underlying device can perform whatever needed clean-ups to
 *	stop supporting acceleration of FCoE traffic.
 *
 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
 *			     struct scatterlist *sgl, unsigned int sgc);
 *	Called when the FCoE Initiator wants to initialize an I/O that
 *	is a possible candidate for Direct Data Placement (DDP). The LLD can
 *	perform necessary setup and returns 1 to indicate the device is set up
 *	successfully to perform DDP on this I/O, otherwise this returns 0.
 *
 * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
 *	Called when the FCoE Initiator/Target is done with the DDPed I/O as
 *	indicated by the FC exchange id 'xid', so the underlying device can
 *	clean up and reuse resources for later DDP requests.
 *
 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
 *			      struct scatterlist *sgl, unsigned int sgc);
 *	Called when the FCoE Target wants to initialize an I/O that
 *	is a possible candidate for Direct Data Placement (DDP). The LLD can
 *	perform necessary setup and returns 1 to indicate the device is set up
 *	successfully to perform DDP on this I/O, otherwise this returns 0.
 *
925 926 927 928 929 930 931
 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
 *			       struct netdev_fcoe_hbainfo *hbainfo);
 *	Called when the FCoE Protocol stack wants information on the underlying
 *	device. This information is utilized by the FCoE protocol stack to
 *	register attributes with Fiber Channel management service as per the
 *	FC-GS Fabric Device Management Information(FDMI) specification.
 *
932 933 934 935 936 937
 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
 *	Called when the underlying device wants to override default World Wide
 *	Name (WWN) generation mechanism in FCoE protocol stack to pass its own
 *	World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
 *	protocol stack to use.
 *
938 939 940 941 942 943
 *	RFS acceleration.
 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
 *			    u16 rxq_index, u32 flow_id);
 *	Set hardware filter for RFS.  rxq_index is the target queue index;
 *	flow_id is a flow ID to be passed to rps_may_expire_flow() later.
 *	Return the filter ID on success, or a negative error code.
944
 *
945
 *	Slave management functions (for bridge, bonding, etc).
946 947 948 949 950
 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
 *	Called to make another netdev an underling.
 *
 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
 *	Called to release previously enslaved netdev.
951 952
 *
 *      Feature/offload setting functions.
953 954
 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
 *		netdev_features_t features);
955 956 957 958
 *	Adjusts the requested feature flags according to device-specific
 *	constraints, and returns the resulting flags. Must not modify
 *	the device state.
 *
959
 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
960 961 962 963
 *	Called to update device configuration to new features. Passed
 *	feature set might be less than what was returned by ndo_fix_features()).
 *	Must return >0 or -errno if it changed dev->features itself.
 *
964 965
 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
 *		      struct net_device *dev,
966
 *		      const unsigned char *addr, u16 vid, u16 flags)
967
 *	Adds an FDB entry to dev for addr.
968 969
 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
 *		      struct net_device *dev,
970
 *		      const unsigned char *addr, u16 vid)
971 972
 *	Deletes the FDB entry from dev coresponding to addr.
 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
973 974
 *		       struct net_device *dev, struct net_device *filter_dev,
 *		       int idx)
975 976
 *	Used to add FDB entries to dump requests. Implementers should add
 *	entries to skb and update idx with the number of entries.
J
John Fastabend 已提交
977
 *
978 979
 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
 *			     u16 flags)
J
John Fastabend 已提交
980
 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
981 982
 *			     struct net_device *dev, u32 filter_mask,
 *			     int nlflags)
983 984
 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
 *			     u16 flags);
J
Jiri Pirko 已提交
985 986 987 988 989 990 991 992
 *
 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
 *	Called to change device carrier. Soft-devices (like dummy, team, etc)
 *	which do not represent real hardware may define this to allow their
 *	userspace components to manage their virtual carrier state. Devices
 *	that determine carrier state from physical hardware properties (eg
 *	network cables) or protocol-dependent mechanisms (eg
 *	USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
993 994
 *
 * int (*ndo_get_phys_port_id)(struct net_device *dev,
995
 *			       struct netdev_phys_item_id *ppid);
996 997 998
 *	Called to get ID of physical port of this device. If driver does
 *	not implement this, it is assumed that the hw is not able to have
 *	multiple net devices on single physical port.
999 1000
 *
 * void (*ndo_add_vxlan_port)(struct  net_device *dev,
J
Joseph Gasparakis 已提交
1001
 *			      sa_family_t sa_family, __be16 port);
1002 1003 1004 1005 1006 1007
 *	Called by vxlan to notiy a driver about the UDP port and socket
 *	address family that vxlan is listnening to. It is called only when
 *	a new port starts listening. The operation is protected by the
 *	vxlan_net->sock_lock.
 *
 * void (*ndo_del_vxlan_port)(struct  net_device *dev,
J
Joseph Gasparakis 已提交
1008
 *			      sa_family_t sa_family, __be16 port);
1009 1010 1011
 *	Called by vxlan to notify the driver about a UDP port and socket
 *	address family that vxlan is not listening to anymore. The operation
 *	is protected by the vxlan_net->sock_lock.
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
 *
 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
 *				 struct net_device *dev)
 *	Called by upper layer devices to accelerate switching or other
 *	station functionality into hardware. 'pdev is the lowerdev
 *	to use for the offload and 'dev' is the net device that will
 *	back the offload. Returns a pointer to the private structure
 *	the upper layer will maintain.
 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
 *	Called by upper layer device to delete the station created
 *	by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
 *	the station and priv is the structure returned by the add
 *	operation.
 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
 *				      struct net_device *dev,
 *				      void *priv);
 *	Callback to use for xmit over the accelerated station. This
 *	is used in place of ndo_start_xmit on accelerated net
 *	devices.
1031 1032 1033
 * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
 *					    struct net_device *dev
 *					    netdev_features_t features);
T
Tom Herbert 已提交
1034
 *	Called by core transmit path to determine if device is capable of
1035 1036 1037 1038 1039
 *	performing offload operations on a given packet. This is to give
 *	the device an opportunity to implement any restrictions that cannot
 *	be otherwise expressed by feature flags. The check is called with
 *	the set of features that the stack has calculated and it returns
 *	those the driver believes to be appropriate.
1040 1041 1042 1043
 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
 *			     int queue_index, u32 maxrate);
 *	Called when a user wants to set a max-rate limitation of specific
 *	TX queue.
1044 1045
 * int (*ndo_get_iflink)(const struct net_device *dev);
 *	Called to get the iflink value of this device.
1046 1047 1048 1049 1050
 * void (*ndo_change_proto_down)(struct net_device *dev,
 *				  bool proto_down);
 *	This function is used to pass protocol port error state information
 *	to the switch driver. The switch driver can react to the proto_down
 *      by doing a phys down on the associated switch port.
1051 1052 1053 1054
 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
 *	This function is used to get egress tunnel information for given skb.
 *	This is useful for retrieving outer tunnel header parameters while
 *	sampling packet.
1055
 *
1056 1057 1058 1059 1060 1061
 */
struct net_device_ops {
	int			(*ndo_init)(struct net_device *dev);
	void			(*ndo_uninit)(struct net_device *dev);
	int			(*ndo_open)(struct net_device *dev);
	int			(*ndo_stop)(struct net_device *dev);
1062
	netdev_tx_t		(*ndo_start_xmit) (struct sk_buff *skb,
1063 1064
						   struct net_device *dev);
	u16			(*ndo_select_queue)(struct net_device *dev,
1065
						    struct sk_buff *skb,
1066 1067
						    void *accel_priv,
						    select_queue_fallback_t fallback);
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
	void			(*ndo_change_rx_flags)(struct net_device *dev,
						       int flags);
	void			(*ndo_set_rx_mode)(struct net_device *dev);
	int			(*ndo_set_mac_address)(struct net_device *dev,
						       void *addr);
	int			(*ndo_validate_addr)(struct net_device *dev);
	int			(*ndo_do_ioctl)(struct net_device *dev,
					        struct ifreq *ifr, int cmd);
	int			(*ndo_set_config)(struct net_device *dev,
					          struct ifmap *map);
1078 1079 1080 1081
	int			(*ndo_change_mtu)(struct net_device *dev,
						  int new_mtu);
	int			(*ndo_neigh_setup)(struct net_device *dev,
						   struct neigh_parms *);
1082 1083
	void			(*ndo_tx_timeout) (struct net_device *dev);

1084 1085
	struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
						     struct rtnl_link_stats64 *storage);
1086 1087
	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);

1088
	int			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
1089
						       __be16 proto, u16 vid);
1090
	int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1091
						        __be16 proto, u16 vid);
1092 1093
#ifdef CONFIG_NET_POLL_CONTROLLER
	void                    (*ndo_poll_controller)(struct net_device *dev);
H
Herbert Xu 已提交
1094
	int			(*ndo_netpoll_setup)(struct net_device *dev,
1095
						     struct netpoll_info *info);
1096
	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
E
Eliezer Tamir 已提交
1097
#endif
1098
#ifdef CONFIG_NET_RX_BUSY_POLL
1099
	int			(*ndo_busy_poll)(struct napi_struct *dev);
1100
#endif
1101 1102 1103 1104
	int			(*ndo_set_vf_mac)(struct net_device *dev,
						  int queue, u8 *mac);
	int			(*ndo_set_vf_vlan)(struct net_device *dev,
						   int queue, u16 vlan, u8 qos);
1105 1106 1107
	int			(*ndo_set_vf_rate)(struct net_device *dev,
						   int vf, int min_tx_rate,
						   int max_tx_rate);
1108 1109
	int			(*ndo_set_vf_spoofchk)(struct net_device *dev,
						       int vf, bool setting);
H
Hiroshi Shimamoto 已提交
1110 1111
	int			(*ndo_set_vf_trust)(struct net_device *dev,
						    int vf, bool setting);
1112 1113 1114
	int			(*ndo_get_vf_config)(struct net_device *dev,
						     int vf,
						     struct ifla_vf_info *ivf);
1115 1116
	int			(*ndo_set_vf_link_state)(struct net_device *dev,
							 int vf, int link_state);
1117 1118 1119 1120
	int			(*ndo_get_vf_stats)(struct net_device *dev,
						    int vf,
						    struct ifla_vf_stats
						    *vf_stats);
1121 1122 1123 1124 1125
	int			(*ndo_set_vf_port)(struct net_device *dev,
						   int vf,
						   struct nlattr *port[]);
	int			(*ndo_get_vf_port)(struct net_device *dev,
						   int vf, struct sk_buff *skb);
1126 1127 1128
	int			(*ndo_set_vf_rss_query_en)(
						   struct net_device *dev,
						   int vf, bool setting);
1129
	int			(*ndo_setup_tc)(struct net_device *dev, u8 tc);
1130
#if IS_ENABLED(CONFIG_FCOE)
1131 1132
	int			(*ndo_fcoe_enable)(struct net_device *dev);
	int			(*ndo_fcoe_disable)(struct net_device *dev);
1133 1134 1135 1136 1137 1138
	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev,
						      u16 xid,
						      struct scatterlist *sgl,
						      unsigned int sgc);
	int			(*ndo_fcoe_ddp_done)(struct net_device *dev,
						     u16 xid);
1139 1140 1141 1142
	int			(*ndo_fcoe_ddp_target)(struct net_device *dev,
						       u16 xid,
						       struct scatterlist *sgl,
						       unsigned int sgc);
1143 1144
	int			(*ndo_fcoe_get_hbainfo)(struct net_device *dev,
							struct netdev_fcoe_hbainfo *hbainfo);
1145 1146
#endif

1147
#if IS_ENABLED(CONFIG_LIBFCOE)
1148 1149 1150 1151
#define NETDEV_FCOE_WWNN 0
#define NETDEV_FCOE_WWPN 1
	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
						    u64 *wwn, int type);
1152
#endif
1153

1154 1155 1156 1157 1158 1159
#ifdef CONFIG_RFS_ACCEL
	int			(*ndo_rx_flow_steer)(struct net_device *dev,
						     const struct sk_buff *skb,
						     u16 rxq_index,
						     u32 flow_id);
#endif
1160 1161 1162 1163
	int			(*ndo_add_slave)(struct net_device *dev,
						 struct net_device *slave_dev);
	int			(*ndo_del_slave)(struct net_device *dev,
						 struct net_device *slave_dev);
1164 1165
	netdev_features_t	(*ndo_fix_features)(struct net_device *dev,
						    netdev_features_t features);
1166
	int			(*ndo_set_features)(struct net_device *dev,
1167
						    netdev_features_t features);
1168
	int			(*ndo_neigh_construct)(struct neighbour *n);
1169
	void			(*ndo_neigh_destroy)(struct neighbour *n);
1170 1171

	int			(*ndo_fdb_add)(struct ndmsg *ndm,
1172
					       struct nlattr *tb[],
1173
					       struct net_device *dev,
1174
					       const unsigned char *addr,
1175
					       u16 vid,
1176 1177
					       u16 flags);
	int			(*ndo_fdb_del)(struct ndmsg *ndm,
1178
					       struct nlattr *tb[],
1179
					       struct net_device *dev,
1180 1181
					       const unsigned char *addr,
					       u16 vid);
1182 1183 1184
	int			(*ndo_fdb_dump)(struct sk_buff *skb,
						struct netlink_callback *cb,
						struct net_device *dev,
1185
						struct net_device *filter_dev,
1186
						int idx);
J
John Fastabend 已提交
1187 1188

	int			(*ndo_bridge_setlink)(struct net_device *dev,
1189 1190
						      struct nlmsghdr *nlh,
						      u16 flags);
J
John Fastabend 已提交
1191 1192
	int			(*ndo_bridge_getlink)(struct sk_buff *skb,
						      u32 pid, u32 seq,
1193
						      struct net_device *dev,
1194 1195
						      u32 filter_mask,
						      int nlflags);
1196
	int			(*ndo_bridge_dellink)(struct net_device *dev,
1197 1198
						      struct nlmsghdr *nlh,
						      u16 flags);
J
Jiri Pirko 已提交
1199 1200
	int			(*ndo_change_carrier)(struct net_device *dev,
						      bool new_carrier);
1201
	int			(*ndo_get_phys_port_id)(struct net_device *dev,
1202
							struct netdev_phys_item_id *ppid);
1203 1204
	int			(*ndo_get_phys_port_name)(struct net_device *dev,
							  char *name, size_t len);
1205 1206
	void			(*ndo_add_vxlan_port)(struct  net_device *dev,
						      sa_family_t sa_family,
J
Joseph Gasparakis 已提交
1207
						      __be16 port);
1208 1209
	void			(*ndo_del_vxlan_port)(struct  net_device *dev,
						      sa_family_t sa_family,
J
Joseph Gasparakis 已提交
1210
						      __be16 port);
1211 1212 1213 1214 1215 1216 1217 1218 1219

	void*			(*ndo_dfwd_add_station)(struct net_device *pdev,
							struct net_device *dev);
	void			(*ndo_dfwd_del_station)(struct net_device *pdev,
							void *priv);

	netdev_tx_t		(*ndo_dfwd_start_xmit) (struct sk_buff *skb,
							struct net_device *dev,
							void *priv);
1220
	int			(*ndo_get_lock_subclass)(struct net_device *dev);
1221 1222 1223
	netdev_features_t	(*ndo_features_check) (struct sk_buff *skb,
						       struct net_device *dev,
						       netdev_features_t features);
1224 1225 1226
	int			(*ndo_set_tx_maxrate)(struct net_device *dev,
						      int queue_index,
						      u32 maxrate);
1227
	int			(*ndo_get_iflink)(const struct net_device *dev);
1228 1229
	int			(*ndo_change_proto_down)(struct net_device *dev,
							 bool proto_down);
1230 1231
	int			(*ndo_fill_metadata_dst)(struct net_device *dev,
						       struct sk_buff *skb);
1232 1233
};

1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
/**
 * enum net_device_priv_flags - &struct net_device priv_flags
 *
 * These are the &struct net_device, they are only set internally
 * by drivers and used in the kernel. These flags are invisible to
 * userspace, this means that the order of these flags can change
 * during any kernel release.
 *
 * You should have a pretty good reason to be extending these flags.
 *
 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
 * @IFF_EBRIDGE: Ethernet bridging device
 * @IFF_BONDING: bonding master or slave
 * @IFF_ISATAP: ISATAP interface (RFC4214)
 * @IFF_WAN_HDLC: WAN HDLC device
 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
 *	release skb->dst
 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
 * @IFF_MACVLAN_PORT: device used as macvlan port
 * @IFF_BRIDGE_PORT: device used as bridge port
 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
 * @IFF_UNICAST_FLT: Supports unicast filtering
 * @IFF_TEAM_PORT: device used as team port
 * @IFF_SUPP_NOFCS: device supports sending custom FCS
 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
 *	change when it's running
 * @IFF_MACVLAN: Macvlan device
1263
 * @IFF_L3MDEV_MASTER: device is an L3 master device
1264
 * @IFF_NO_QUEUE: device can run without qdisc attached
1265
 * @IFF_OPENVSWITCH: device is a Open vSwitch master
D
David Ahern 已提交
1266
 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1267 1268 1269 1270
 */
enum netdev_priv_flags {
	IFF_802_1Q_VLAN			= 1<<0,
	IFF_EBRIDGE			= 1<<1,
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
	IFF_BONDING			= 1<<2,
	IFF_ISATAP			= 1<<3,
	IFF_WAN_HDLC			= 1<<4,
	IFF_XMIT_DST_RELEASE		= 1<<5,
	IFF_DONT_BRIDGE			= 1<<6,
	IFF_DISABLE_NETPOLL		= 1<<7,
	IFF_MACVLAN_PORT		= 1<<8,
	IFF_BRIDGE_PORT			= 1<<9,
	IFF_OVS_DATAPATH		= 1<<10,
	IFF_TX_SKB_SHARING		= 1<<11,
	IFF_UNICAST_FLT			= 1<<12,
	IFF_TEAM_PORT			= 1<<13,
	IFF_SUPP_NOFCS			= 1<<14,
	IFF_LIVE_ADDR_CHANGE		= 1<<15,
	IFF_MACVLAN			= 1<<16,
	IFF_XMIT_DST_RELEASE_PERM	= 1<<17,
	IFF_IPVLAN_MASTER		= 1<<18,
	IFF_IPVLAN_SLAVE		= 1<<19,
1289
	IFF_L3MDEV_MASTER		= 1<<20,
1290 1291
	IFF_NO_QUEUE			= 1<<21,
	IFF_OPENVSWITCH			= 1<<22,
D
David Ahern 已提交
1292
	IFF_L3MDEV_SLAVE		= 1<<23,
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
};

#define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
#define IFF_EBRIDGE			IFF_EBRIDGE
#define IFF_BONDING			IFF_BONDING
#define IFF_ISATAP			IFF_ISATAP
#define IFF_WAN_HDLC			IFF_WAN_HDLC
#define IFF_XMIT_DST_RELEASE		IFF_XMIT_DST_RELEASE
#define IFF_DONT_BRIDGE			IFF_DONT_BRIDGE
#define IFF_DISABLE_NETPOLL		IFF_DISABLE_NETPOLL
#define IFF_MACVLAN_PORT		IFF_MACVLAN_PORT
#define IFF_BRIDGE_PORT			IFF_BRIDGE_PORT
#define IFF_OVS_DATAPATH		IFF_OVS_DATAPATH
#define IFF_TX_SKB_SHARING		IFF_TX_SKB_SHARING
#define IFF_UNICAST_FLT			IFF_UNICAST_FLT
#define IFF_TEAM_PORT			IFF_TEAM_PORT
#define IFF_SUPP_NOFCS			IFF_SUPP_NOFCS
#define IFF_LIVE_ADDR_CHANGE		IFF_LIVE_ADDR_CHANGE
#define IFF_MACVLAN			IFF_MACVLAN
1312
#define IFF_XMIT_DST_RELEASE_PERM	IFF_XMIT_DST_RELEASE_PERM
1313 1314
#define IFF_IPVLAN_MASTER		IFF_IPVLAN_MASTER
#define IFF_IPVLAN_SLAVE		IFF_IPVLAN_SLAVE
1315
#define IFF_L3MDEV_MASTER		IFF_L3MDEV_MASTER
1316
#define IFF_NO_QUEUE			IFF_NO_QUEUE
1317
#define IFF_OPENVSWITCH			IFF_OPENVSWITCH
1318
#define IFF_L3MDEV_SLAVE		IFF_L3MDEV_SLAVE
1319

1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
/**
 *	struct net_device - The DEVICE structure.
 *		Actually, this whole structure is a big mistake.  It mixes I/O
 *		data with strictly "high-level" data, and it has to know about
 *		almost every data structure used in the INET module.
 *
 *	@name:	This is the first field of the "visible" part of this structure
 *		(i.e. as seen by users in the "Space.c" file).  It is the name
 *	 	of the interface.
 *
 *	@name_hlist: 	Device name hash chain, please keep it close to name[]
 *	@ifalias:	SNMP alias
 *	@mem_end:	Shared memory end
 *	@mem_start:	Shared memory start
 *	@base_addr:	Device I/O address
 *	@irq:		Device IRQ number
 *
1337 1338
 *	@carrier_changes:	Stats to monitor carrier on<->off transitions
 *
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
 *	@state:		Generic network queuing layer state, see netdev_state_t
 *	@dev_list:	The global list of network devices
 *	@napi_list:	List entry, that is used for polling napi devices
 *	@unreg_list:	List entry, that is used, when we are unregistering the
 *			device, see the function unregister_netdev
 *	@close_list:	List entry, that is used, when we are closing the device
 *
 *	@adj_list:	Directly linked devices, like slaves for bonding
 *	@all_adj_list:	All linked devices, *including* neighbours
 *	@features:	Currently active device features
 *	@hw_features:	User-changeable features
 *
 *	@wanted_features:	User-requested features
 *	@vlan_features:		Mask of features inheritable by VLAN devices
 *
 *	@hw_enc_features:	Mask of features inherited by encapsulating devices
 *				This field indicates what encapsulation
 *				offloads the hardware is capable of doing,
 *				and drivers will need to set them appropriately.
 *
 *	@mpls_features:	Mask of features inheritable by MPLS
 *
 *	@ifindex:	interface index
1362
 *	@group:		The group, that the device belongs to
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
 *
 *	@stats:		Statistics struct, which was left as a legacy, use
 *			rtnl_link_stats64 instead
 *
 *	@rx_dropped:	Dropped packets by core network,
 *			do not use this in drivers
 *	@tx_dropped:	Dropped packets by core network,
 *			do not use this in drivers
 *
 *	@wireless_handlers:	List of functions to handle Wireless Extensions,
 *				instead of ioctl,
 *				see <net/iw_handler.h> for details.
 *	@wireless_data:	Instance data managed by the core of wireless extensions
 *
 *	@netdev_ops:	Includes several pointers to callbacks,
 *			if one wants to override the ndo_*() functions
 *	@ethtool_ops:	Management operations
E
Eric W. Biederman 已提交
1380
 *	@header_ops:	Includes callbacks for creating,parsing,caching,etc
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
 *			of Layer 2 headers.
 *
 *	@flags:		Interface flags (a la BSD)
 *	@priv_flags:	Like 'flags' but invisible to userspace,
 *			see if.h for the definitions
 *	@gflags:	Global flags ( kept as legacy )
 *	@padded:	How much padding added by alloc_netdev()
 *	@operstate:	RFC2863 operstate
 *	@link_mode:	Mapping policy to operstate
 *	@if_port:	Selectable AUI, TP, ...
 *	@dma:		DMA channel
 *	@mtu:		Interface MTU value
 *	@type:		Interface hardware type
 *	@hard_header_len: Hardware header length
 *
 *	@needed_headroom: Extra headroom the hardware may need, but not in all
 *			  cases can this be guaranteed
 *	@needed_tailroom: Extra tailroom the hardware may need, but not in all
 *			  cases can this be guaranteed. Some cases also use
 *			  LL_MAX_HEADER instead to allocate the skb
 *
 *	interface address info:
 *
 * 	@perm_addr:		Permanent hw address
 * 	@addr_assign_type:	Hw address assignment type
 * 	@addr_len:		Hardware address length
 * 	@neigh_priv_len;	Used in neigh_alloc(),
 * 				initialized only in atm/clip.c
 * 	@dev_id:		Used to differentiate devices that share
 * 				the same link layer address
 * 	@dev_port:		Used to differentiate devices that share
 * 				the same function
 *	@addr_list_lock:	XXX: need comments on this one
 *	@uc_promisc:		Counter, that indicates, that promiscuous mode
 *				has been enabled due to the need to listen to
 *				additional unicast addresses in a device that
 *				does not implement ndo_set_rx_mode()
1418 1419 1420 1421
 *	@uc:			unicast mac addresses
 *	@mc:			multicast mac addresses
 *	@dev_addrs:		list of device hw addresses
 *	@queues_kset:		Group of all Kobjects in the Tx and RX queues
1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
 *	@promiscuity:		Number of times, the NIC is told to work in
 *				Promiscuous mode, if it becomes 0 the NIC will
 *				exit from working in Promiscuous mode
 *	@allmulti:		Counter, enables or disables allmulticast mode
 *
 *	@vlan_info:	VLAN info
 *	@dsa_ptr:	dsa specific data
 *	@tipc_ptr:	TIPC specific data
 *	@atalk_ptr:	AppleTalk link
 *	@ip_ptr:	IPv4 specific data
 *	@dn_ptr:	DECnet specific data
 *	@ip6_ptr:	IPv6 specific data
 *	@ax25_ptr:	AX.25 specific data
 *	@ieee80211_ptr:	IEEE 802.11 specific data, assign before registering
 *
 *	@last_rx:	Time of last Rx
 *	@dev_addr:	Hw address (before bcast,
 *			because most packets are unicast)
 *
 *	@_rx:			Array of RX queues
 *	@num_rx_queues:		Number of RX queues
 *				allocated at register_netdev() time
 *	@real_num_rx_queues: 	Number of RX queues currently active in device
 *
 *	@rx_handler:		handler for received packets
 *	@rx_handler_data: 	XXX: need comments on this one
 *	@ingress_queue:		XXX: need comments on this one
 *	@broadcast:		hw bcast address
 *
1451 1452 1453 1454 1455 1456
 *	@rx_cpu_rmap:	CPU reverse-mapping for RX completion interrupts,
 *			indexed by RX queue number. Assigned by driver.
 *			This must only be set if the ndo_rx_flow_steer
 *			operation is defined
 *	@index_hlist:		Device index hash chain
 *
1457 1458 1459 1460 1461 1462 1463 1464 1465
 *	@_tx:			Array of TX queues
 *	@num_tx_queues:		Number of TX queues allocated at alloc_netdev_mq() time
 *	@real_num_tx_queues: 	Number of TX queues currently active in device
 *	@qdisc:			Root qdisc from userspace point of view
 *	@tx_queue_len:		Max frames per queue allowed
 *	@tx_global_lock: 	XXX: need comments on this one
 *
 *	@xps_maps:	XXX: need comments on this one
 *
1466 1467
 *	@offload_fwd_mark:	Offload device fwding mark
 *
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
 *	@trans_start:		Time (in jiffies) of last Tx
 *	@watchdog_timeo:	Represents the timeout that is used by
 *				the watchdog ( see dev_watchdog() )
 *	@watchdog_timer:	List of timers
 *
 *	@pcpu_refcnt:		Number of references to this device
 *	@todo_list:		Delayed register/unregister
 *	@link_watch_list:	XXX: need comments on this one
 *
 *	@reg_state:		Register/unregister state machine
 *	@dismantle:		Device is going to be freed
 *	@rtnl_link_state:	This enum represents the phases of creating
 *				a new link
 *
 *	@destructor:		Called from unregister,
 *				can be used to call free_netdev
 *	@npinfo:		XXX: need comments on this one
 * 	@nd_net:		Network namespace this network device is inside
 *
 * 	@ml_priv:	Mid-layer private
 * 	@lstats:	Loopback statistics
 * 	@tstats:	Tunnel statistics
 * 	@dstats:	Dummy statistics
 * 	@vstats:	Virtual ethernet statistics
 *
 *	@garp_port:	GARP
 *	@mrp_port:	MRP
 *
 *	@dev:		Class/net/name entry
 *	@sysfs_groups:	Space for optional device, statistics and wireless
 *			sysfs groups
 *
 *	@sysfs_rx_queue_group:	Space for optional per-rx queue attributes
 *	@rtnl_link_ops:	Rtnl_link_ops
 *
 *	@gso_max_size:	Maximum size of generic segmentation offload
 *	@gso_max_segs:	Maximum number of segments that can be passed to the
 *			NIC for GSO
1506 1507
 *	@gso_min_segs:	Minimum number of segments that can be passed to the
 *			NIC for GSO
1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
 *
 *	@dcbnl_ops:	Data Center Bridging netlink ops
 *	@num_tc:	Number of traffic classes in the net device
 *	@tc_to_txq:	XXX: need comments on this one
 *	@prio_tc_map	XXX: need comments on this one
 *
 *	@fcoe_ddp_xid:	Max exchange id for FCoE LRO by ddp
 *
 *	@priomap:	XXX: need comments on this one
 *	@phydev:	Physical device may attach itself
 *			for hardware timestamping
 *
 *	@qdisc_tx_busylock:	XXX: need comments on this one
 *
1522 1523 1524 1525
 *	@proto_down:	protocol port state information can be sent to the
 *			switch driver and used to set the phys state of the
 *			switch port.
 *
L
Linus Torvalds 已提交
1526 1527 1528 1529
 *	FIXME: cleanup struct net_device such that network protocol info
 *	moves out.
 */

E
Eric Dumazet 已提交
1530
struct net_device {
L
Linus Torvalds 已提交
1531
	char			name[IFNAMSIZ];
1532
	struct hlist_node	name_hlist;
1533
	char 			*ifalias;
L
Linus Torvalds 已提交
1534 1535 1536 1537
	/*
	 *	I/O specific fields
	 *	FIXME: Merge these and struct ifmap into one
	 */
1538 1539 1540 1541
	unsigned long		mem_end;
	unsigned long		mem_start;
	unsigned long		base_addr;
	int			irq;
L
Linus Torvalds 已提交
1542

1543 1544
	atomic_t		carrier_changes;

L
Linus Torvalds 已提交
1545
	/*
1546 1547
	 *	Some hardware also needs these fields (state,dev_list,
	 *	napi_list,unreg_list,close_list) but they are not
L
Linus Torvalds 已提交
1548 1549 1550 1551 1552
	 *	part of the usual set specified in Space.c.
	 */

	unsigned long		state;

1553
	struct list_head	dev_list;
1554
	struct list_head	napi_list;
1555
	struct list_head	unreg_list;
1556
	struct list_head	close_list;
1557 1558
	struct list_head	ptype_all;
	struct list_head	ptype_specific;
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568

	struct {
		struct list_head upper;
		struct list_head lower;
	} adj_list;

	struct {
		struct list_head upper;
		struct list_head lower;
	} all_adj_list;
1569

1570 1571 1572 1573
	netdev_features_t	features;
	netdev_features_t	hw_features;
	netdev_features_t	wanted_features;
	netdev_features_t	vlan_features;
1574
	netdev_features_t	hw_enc_features;
S
Simon Horman 已提交
1575
	netdev_features_t	mpls_features;
1576

L
Linus Torvalds 已提交
1577
	int			ifindex;
1578
	int			group;
L
Linus Torvalds 已提交
1579

R
Rusty Russell 已提交
1580
	struct net_device_stats	stats;
1581 1582 1583

	atomic_long_t		rx_dropped;
	atomic_long_t		tx_dropped;
L
Linus Torvalds 已提交
1584

1585
#ifdef CONFIG_WIRELESS_EXT
L
Linus Torvalds 已提交
1586 1587
	const struct iw_handler_def *	wireless_handlers;
	struct iw_public_data *	wireless_data;
1588
#endif
1589
	const struct net_device_ops *netdev_ops;
1590
	const struct ethtool_ops *ethtool_ops;
S
Scott Feldman 已提交
1591
#ifdef CONFIG_NET_SWITCHDEV
J
Jiri Pirko 已提交
1592
	const struct switchdev_ops *switchdev_ops;
S
Scott Feldman 已提交
1593
#endif
1594 1595 1596
#ifdef CONFIG_NET_L3_MASTER_DEV
	const struct l3mdev_ops	*l3mdev_ops;
#endif
L
Linus Torvalds 已提交
1597

1598 1599
	const struct header_ops *header_ops;

1600 1601 1602
	unsigned int		flags;
	unsigned int		priv_flags;

L
Linus Torvalds 已提交
1603
	unsigned short		gflags;
1604
	unsigned short		padded;
L
Linus Torvalds 已提交
1605

1606 1607
	unsigned char		operstate;
	unsigned char		link_mode;
S
Stefan Rompf 已提交
1608

1609 1610
	unsigned char		if_port;
	unsigned char		dma;
1611

1612 1613 1614
	unsigned int		mtu;
	unsigned short		type;
	unsigned short		hard_header_len;
L
Linus Torvalds 已提交
1615

1616 1617 1618
	unsigned short		needed_headroom;
	unsigned short		needed_tailroom;

L
Linus Torvalds 已提交
1619
	/* Interface address info. */
1620 1621 1622
	unsigned char		perm_addr[MAX_ADDR_LEN];
	unsigned char		addr_assign_type;
	unsigned char		addr_len;
1623
	unsigned short		neigh_priv_len;
1624 1625
	unsigned short          dev_id;
	unsigned short          dev_port;
J
Jiri Pirko 已提交
1626
	spinlock_t		addr_list_lock;
1627 1628
	unsigned char		name_assign_type;
	bool			uc_promisc;
1629 1630 1631 1632
	struct netdev_hw_addr_list	uc;
	struct netdev_hw_addr_list	mc;
	struct netdev_hw_addr_list	dev_addrs;

1633 1634 1635
#ifdef CONFIG_SYSFS
	struct kset		*queues_kset;
#endif
1636 1637
	unsigned int		promiscuity;
	unsigned int		allmulti;
L
Linus Torvalds 已提交
1638 1639 1640


	/* Protocol specific pointers */
1641

1642
#if IS_ENABLED(CONFIG_VLAN_8021Q)
1643
	struct vlan_info __rcu	*vlan_info;
1644
#endif
1645
#if IS_ENABLED(CONFIG_NET_DSA)
1646
	struct dsa_switch_tree	*dsa_ptr;
1647 1648
#endif
#if IS_ENABLED(CONFIG_TIPC)
1649
	struct tipc_bearer __rcu *tipc_ptr;
1650
#endif
1651 1652 1653 1654 1655 1656
	void 			*atalk_ptr;
	struct in_device __rcu	*ip_ptr;
	struct dn_dev __rcu     *dn_ptr;
	struct inet6_dev __rcu	*ip6_ptr;
	void			*ax25_ptr;
	struct wireless_dev	*ieee80211_ptr;
1657
	struct wpan_dev		*ieee802154_ptr;
R
Robert Shearman 已提交
1658 1659 1660
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
	struct mpls_dev __rcu	*mpls_ptr;
#endif
L
Linus Torvalds 已提交
1661

1662
/*
E
Eric Dumazet 已提交
1663
 * Cache lines mostly used on receive path (including eth_type_trans())
1664
 */
1665
	unsigned long		last_rx;
1666

1667
	/* Interface address info used in eth_type_trans() */
1668
	unsigned char		*dev_addr;
1669

T
Tom Herbert 已提交
1670

1671
#ifdef CONFIG_SYSFS
T
Tom Herbert 已提交
1672 1673 1674
	struct netdev_rx_queue	*_rx;

	unsigned int		num_rx_queues;
1675
	unsigned int		real_num_rx_queues;
1676

E
Eric Dumazet 已提交
1677
#endif
T
Tom Herbert 已提交
1678

1679
	unsigned long		gro_flush_timeout;
1680 1681
	rx_handler_func_t __rcu	*rx_handler;
	void __rcu		*rx_handler_data;
1682

1683
#ifdef CONFIG_NET_CLS_ACT
1684 1685
	struct tcf_proto __rcu  *ingress_cl_list;
#endif
1686
	struct netdev_queue __rcu *ingress_queue;
1687 1688 1689
#ifdef CONFIG_NETFILTER_INGRESS
	struct list_head	nf_hooks_ingress;
#endif
1690

1691
	unsigned char		broadcast[MAX_ADDR_LEN];
1692 1693 1694 1695
#ifdef CONFIG_RFS_ACCEL
	struct cpu_rmap		*rx_cpu_rmap;
#endif
	struct hlist_node	index_hlist;
E
Eric Dumazet 已提交
1696 1697 1698 1699

/*
 * Cache lines mostly used on transmit path
 */
1700 1701
	struct netdev_queue	*_tx ____cacheline_aligned_in_smp;
	unsigned int		num_tx_queues;
1702
	unsigned int		real_num_tx_queues;
1703
	struct Qdisc		*qdisc;
1704
	unsigned long		tx_queue_len;
1705
	spinlock_t		tx_global_lock;
1706
	int			watchdog_timeo;
E
Eric Dumazet 已提交
1707

T
Tom Herbert 已提交
1708
#ifdef CONFIG_XPS
E
Eric Dumazet 已提交
1709
	struct xps_dev_maps __rcu *xps_maps;
T
Tom Herbert 已提交
1710
#endif
T
Tom Herbert 已提交
1711

1712 1713 1714 1715
#ifdef CONFIG_NET_SWITCHDEV
	u32			offload_fwd_mark;
#endif

1716
	/* These may be needed for future network-power-down code. */
1717 1718 1719 1720 1721

	/*
	 * trans_start here is expensive for high speed devices on SMP,
	 * please use netdev_queue->trans_start instead.
	 */
1722
	unsigned long		trans_start;
1723 1724 1725

	struct timer_list	watchdog_timer;

E
Eric Dumazet 已提交
1726
	int __percpu		*pcpu_refcnt;
L
Linus Torvalds 已提交
1727 1728
	struct list_head	todo_list;

1729
	struct list_head	link_watch_list;
1730

L
Linus Torvalds 已提交
1731
	enum { NETREG_UNINITIALIZED=0,
1732
	       NETREG_REGISTERED,	/* completed register_netdevice */
L
Linus Torvalds 已提交
1733 1734 1735
	       NETREG_UNREGISTERING,	/* called unregister_netdevice */
	       NETREG_UNREGISTERED,	/* completed unregister todo */
	       NETREG_RELEASED,		/* called free_netdev */
1736
	       NETREG_DUMMY,		/* dummy device for NAPI poll */
1737 1738
	} reg_state:8;

1739
	bool dismantle;
1740 1741 1742 1743 1744

	enum {
		RTNL_LINK_INITIALIZED,
		RTNL_LINK_INITIALIZING,
	} rtnl_link_state:16;
L
Linus Torvalds 已提交
1745

1746
	void (*destructor)(struct net_device *dev);
L
Linus Torvalds 已提交
1747 1748

#ifdef CONFIG_NETPOLL
1749
	struct netpoll_info __rcu	*npinfo;
L
Linus Torvalds 已提交
1750
#endif
1751

1752
	possible_net_t			nd_net;
1753

D
David S. Miller 已提交
1754
	/* mid-layer private */
E
Eric Dumazet 已提交
1755
	union {
1756 1757
		void					*ml_priv;
		struct pcpu_lstats __percpu		*lstats;
1758
		struct pcpu_sw_netstats __percpu	*tstats;
1759 1760
		struct pcpu_dstats __percpu		*dstats;
		struct pcpu_vstats __percpu		*vstats;
E
Eric Dumazet 已提交
1761
	};
1762

E
Eric Dumazet 已提交
1763
	struct garp_port __rcu	*garp_port;
1764
	struct mrp_port __rcu	*mrp_port;
L
Linus Torvalds 已提交
1765

1766
	struct device	dev;
1767
	const struct attribute_group *sysfs_groups[4];
1768
	const struct attribute_group *sysfs_rx_queue_group;
P
Patrick McHardy 已提交
1769 1770

	const struct rtnl_link_ops *rtnl_link_ops;
1771

1772 1773 1774
	/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE		65536
	unsigned int		gso_max_size;
1775 1776
#define GSO_MAX_SEGS		65535
	u16			gso_max_segs;
1777
	u16			gso_min_segs;
J
Jeff Kirsher 已提交
1778
#ifdef CONFIG_DCB
1779
	const struct dcbnl_rtnl_ops *dcbnl_ops;
1780
#endif
1781 1782 1783
	u8 num_tc;
	struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
	u8 prio_tc_map[TC_BITMASK + 1];
1784

1785
#if IS_ENABLED(CONFIG_FCOE)
1786
	unsigned int		fcoe_ddp_xid;
1787
#endif
1788
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1789
	struct netprio_map __rcu *priomap;
1790
#endif
1791
	struct phy_device *phydev;
1792
	struct lock_class_key *qdisc_tx_busylock;
1793
	bool proto_down;
L
Linus Torvalds 已提交
1794
};
1795
#define to_net_dev(d) container_of(d, struct net_device, dev)
L
Linus Torvalds 已提交
1796 1797 1798

#define	NETDEV_ALIGN		32

1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
static inline
int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
{
	return dev->prio_tc_map[prio & TC_BITMASK];
}

static inline
int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
{
	if (tc >= dev->num_tc)
		return -EINVAL;

	dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
	return 0;
}

static inline
void netdev_reset_tc(struct net_device *dev)
{
	dev->num_tc = 0;
	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
}

static inline
int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
{
	if (tc >= dev->num_tc)
		return -EINVAL;

	dev->tc_to_txq[tc].count = count;
	dev->tc_to_txq[tc].offset = offset;
	return 0;
}

static inline
int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
{
	if (num_tc > TC_MAX_QUEUE)
		return -EINVAL;

	dev->num_tc = num_tc;
	return 0;
}

static inline
int netdev_get_num_tc(struct net_device *dev)
{
	return dev->num_tc;
}

1850 1851 1852 1853 1854 1855 1856
static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
					 unsigned int index)
{
	return &dev->_tx[index];
}

1857 1858 1859 1860 1861 1862
static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
						    const struct sk_buff *skb)
{
	return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
}

1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874
static inline void netdev_for_each_tx_queue(struct net_device *dev,
					    void (*f)(struct net_device *,
						      struct netdev_queue *,
						      void *),
					    void *arg)
{
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++)
		f(dev, &dev->_tx[i], arg);
}

1875
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1876 1877
				    struct sk_buff *skb,
				    void *accel_priv);
1878

1879 1880 1881 1882 1883 1884
/*
 * Net namespace inlines
 */
static inline
struct net *dev_net(const struct net_device *dev)
{
E
Eric Dumazet 已提交
1885
	return read_pnet(&dev->nd_net);
1886 1887 1888
}

static inline
1889
void dev_net_set(struct net_device *dev, struct net *net)
1890
{
1891
	write_pnet(&dev->nd_net, net);
1892 1893
}

1894
static inline bool netdev_uses_dsa(struct net_device *dev)
1895
{
1896
#if IS_ENABLED(CONFIG_NET_DSA)
1897 1898
	if (dev->dsa_ptr != NULL)
		return dsa_uses_tagged_protocol(dev->dsa_ptr);
1899
#endif
1900
	return false;
1901 1902
}

1903 1904 1905 1906 1907 1908
/**
 *	netdev_priv - access network device private data
 *	@dev: network device
 *
 * Get network device private data
 */
1909
static inline void *netdev_priv(const struct net_device *dev)
L
Linus Torvalds 已提交
1910
{
1911
	return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
L
Linus Torvalds 已提交
1912 1913 1914 1915 1916
}

/* Set the sysfs physical device reference for the network logical device
 * if set prior to registration will cause a symlink during initialization.
 */
1917
#define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))
L
Linus Torvalds 已提交
1918

1919
/* Set the sysfs device type for the network logical device to allow
1920
 * fine-grained identification of different network device types. For
1921 1922 1923 1924
 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
 */
#define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))

E
Eric Dumazet 已提交
1925 1926 1927 1928 1929
/* Default NAPI poll() weight
 * Device drivers are strongly advised to not use bigger value
 */
#define NAPI_POLL_WEIGHT 64

1930 1931 1932 1933 1934 1935 1936 1937 1938 1939
/**
 *	netif_napi_add - initialize a napi context
 *	@dev:  network device
 *	@napi: napi context
 *	@poll: polling function
 *	@weight: default weight
 *
 * netif_napi_add() must be used to initialize a napi context prior to calling
 * *any* of the other napi related functions.
 */
1940 1941
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
		    int (*poll)(struct napi_struct *, int), int weight);
1942

E
Eric Dumazet 已提交
1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
/**
 *	netif_tx_napi_add - initialize a napi context
 *	@dev:  network device
 *	@napi: napi context
 *	@poll: polling function
 *	@weight: default weight
 *
 * This variant of netif_napi_add() should be used from drivers using NAPI
 * to exclusively poll a TX queue.
 * This will avoid we add it into napi_hash[], thus polluting this hash table.
 */
static inline void netif_tx_napi_add(struct net_device *dev,
				     struct napi_struct *napi,
				     int (*poll)(struct napi_struct *, int),
				     int weight)
{
	set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
	netif_napi_add(dev, napi, poll, weight);
}

1963 1964 1965 1966 1967 1968
/**
 *  netif_napi_del - remove a napi context
 *  @napi: napi context
 *
 *  netif_napi_del() removes a napi context from the network device napi list
 */
1969 1970 1971
void netif_napi_del(struct napi_struct *napi);

struct napi_gro_cb {
1972 1973 1974
	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
	void *frag0;

1975 1976 1977
	/* Length of frag0. */
	unsigned int frag0_len;

1978 1979 1980
	/* This indicates where we are processing relative to skb->data. */
	int data_offset;

1981
	/* This is non-zero if the packet cannot be merged with the new skb. */
1982 1983 1984 1985
	u16	flush;

	/* Save the IP ID here and check when we get to the transport layer */
	u16	flush_id;
1986 1987

	/* Number of segments aggregated. */
1988 1989
	u16	count;

1990 1991 1992
	/* Start offset for remote checksum offload */
	u16	gro_remcsum_start;

1993 1994
	/* jiffies when first packet was created/queued */
	unsigned long age;
1995

T
Tom Herbert 已提交
1996
	/* Used in ipv6_gro_receive() and foo-over-udp */
1997 1998
	u16	proto;

1999 2000 2001
	/* This is non-zero if the packet may be of the same flow. */
	u8	same_flow:1;

2002
	/* Used in udp_gro_receive */
T
Tom Herbert 已提交
2003 2004 2005 2006 2007
	u8	udp_mark:1;

	/* GRO checksum is valid */
	u8	csum_valid:1;

2008 2009
	/* Number of checksums via CHECKSUM_UNNECESSARY */
	u8	csum_cnt:3;
2010

2011 2012 2013 2014 2015
	/* Free the skb? */
	u8	free:2;
#define NAPI_GRO_FREE		  1
#define NAPI_GRO_FREE_STOLEN_HEAD 2

2016 2017 2018
	/* Used in foo-over-udp, set in udp[46]_gro_receive */
	u8	is_ipv6:1;

2019 2020
	/* 7 bit hole */

2021 2022 2023
	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
	__wsum	csum;

2024 2025
	/* used in skb_gro_receive() slow path */
	struct sk_buff *last;
2026 2027 2028
};

#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2029

L
Linus Torvalds 已提交
2030
struct packet_type {
D
David S. Miller 已提交
2031 2032 2033 2034 2035 2036
	__be16			type;	/* This is really htons(ether_type). */
	struct net_device	*dev;	/* NULL is wildcarded here	     */
	int			(*func) (struct sk_buff *,
					 struct net_device *,
					 struct packet_type *,
					 struct net_device *);
2037 2038
	bool			(*id_match)(struct packet_type *ptype,
					    struct sock *sk);
L
Linus Torvalds 已提交
2039 2040 2041 2042
	void			*af_packet_priv;
	struct list_head	list;
};

2043
struct offload_callbacks {
2044
	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
2045
						netdev_features_t features);
2046
	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
2047
						 struct sk_buff *skb);
2048
	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
2049 2050 2051 2052
};

struct packet_offload {
	__be16			 type;	/* This is really htons(ether_type). */
2053
	u16			 priority;
2054 2055
	struct offload_callbacks callbacks;
	struct list_head	 list;
L
Linus Torvalds 已提交
2056 2057
};

2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
struct udp_offload;

struct udp_offload_callbacks {
	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
						 struct sk_buff *skb,
						 struct udp_offload *uoff);
	int			(*gro_complete)(struct sk_buff *skb,
						int nhoff,
						struct udp_offload *uoff);
};

2069 2070
struct udp_offload {
	__be16			 port;
T
Tom Herbert 已提交
2071
	u8			 ipproto;
2072
	struct udp_offload_callbacks callbacks;
2073 2074
};

2075 2076 2077 2078 2079 2080 2081 2082 2083
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
	u64     rx_packets;
	u64     rx_bytes;
	u64     tx_packets;
	u64     tx_bytes;
	struct u64_stats_sync   syncp;
};

2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
#define __netdev_alloc_pcpu_stats(type, gfp)				\
({									\
	typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
	if (pcpu_stats)	{						\
		int __cpu;						\
		for_each_possible_cpu(__cpu) {				\
			typeof(type) *stat;				\
			stat = per_cpu_ptr(pcpu_stats, __cpu);		\
			u64_stats_init(&stat->syncp);			\
		}							\
	}								\
	pcpu_stats;							\
2096 2097
})

2098 2099 2100
#define netdev_alloc_pcpu_stats(type)					\
	__netdev_alloc_pcpu_stats(type, GFP_KERNEL);

L
Linus Torvalds 已提交
2101 2102
#include <linux/notifier.h>

2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
/* netdevice notifier chain. Please remember to update the rtnetlink
 * notification exclusion list in rtnetlink_event() when adding new
 * types.
 */
#define NETDEV_UP	0x0001	/* For now you can't veto a device up/down */
#define NETDEV_DOWN	0x0002
#define NETDEV_REBOOT	0x0003	/* Tell a protocol stack a network interface
				   detected a hardware crash and restarted
				   - we can use this eg to kick tcp sessions
				   once done */
#define NETDEV_CHANGE	0x0004	/* Notify device state change */
#define NETDEV_REGISTER 0x0005
#define NETDEV_UNREGISTER	0x0006
2116
#define NETDEV_CHANGEMTU	0x0007 /* notify after mtu change happened */
2117 2118 2119 2120 2121 2122 2123 2124 2125
#define NETDEV_CHANGEADDR	0x0008
#define NETDEV_GOING_DOWN	0x0009
#define NETDEV_CHANGENAME	0x000A
#define NETDEV_FEAT_CHANGE	0x000B
#define NETDEV_BONDING_FAILOVER 0x000C
#define NETDEV_PRE_UP		0x000D
#define NETDEV_PRE_TYPE_CHANGE	0x000E
#define NETDEV_POST_TYPE_CHANGE	0x000F
#define NETDEV_POST_INIT	0x0010
2126
#define NETDEV_UNREGISTER_FINAL 0x0011
2127 2128 2129
#define NETDEV_RELEASE		0x0012
#define NETDEV_NOTIFY_PEERS	0x0013
#define NETDEV_JOIN		0x0014
2130
#define NETDEV_CHANGEUPPER	0x0015
2131
#define NETDEV_RESEND_IGMP	0x0016
2132
#define NETDEV_PRECHANGEMTU	0x0017 /* notify before mtu change happened */
2133
#define NETDEV_CHANGEINFODATA	0x0018
2134
#define NETDEV_BONDING_INFO	0x0019
2135
#define NETDEV_PRECHANGEUPPER	0x001A
2136

2137 2138
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
2139 2140 2141 2142 2143

struct netdev_notifier_info {
	struct net_device *dev;
};

2144 2145 2146 2147 2148
struct netdev_notifier_change_info {
	struct netdev_notifier_info info; /* must be first */
	unsigned int flags_changed;
};

2149 2150 2151 2152 2153 2154 2155
struct netdev_notifier_changeupper_info {
	struct netdev_notifier_info info; /* must be first */
	struct net_device *upper_dev; /* new upper dev */
	bool master; /* is upper dev master */
	bool linking; /* is the nofication for link or unlink */
};

2156 2157 2158 2159 2160 2161
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
					     struct net_device *dev)
{
	info->dev = dev;
}

2162 2163 2164 2165 2166 2167
static inline struct net_device *
netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
{
	return info->dev;
}

2168
int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2169 2170


L
Linus Torvalds 已提交
2171 2172
extern rwlock_t				dev_base_lock;		/* Device list lock */

2173 2174
#define for_each_netdev(net, d)		\
		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2175 2176
#define for_each_netdev_reverse(net, d)	\
		list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2177 2178
#define for_each_netdev_rcu(net, d)		\
		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2179 2180 2181 2182
#define for_each_netdev_safe(net, d, n)	\
		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue(net, d)		\
		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2183 2184
#define for_each_netdev_continue_rcu(net, d)		\
	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2185 2186
#define for_each_netdev_in_bond_rcu(bond, slave)	\
		for_each_netdev_rcu(&init_net, slave)	\
2187
			if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2188
#define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)
2189

2190 2191 2192 2193 2194
static inline struct net_device *next_net_device(struct net_device *dev)
{
	struct list_head *lh;
	struct net *net;

2195
	net = dev_net(dev);
2196 2197 2198 2199
	lh = dev->dev_list.next;
	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}

2200 2201 2202 2203 2204 2205
static inline struct net_device *next_net_device_rcu(struct net_device *dev)
{
	struct list_head *lh;
	struct net *net;

	net = dev_net(dev);
E
Eric Dumazet 已提交
2206
	lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2207 2208 2209
	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}

2210 2211 2212 2213 2214
static inline struct net_device *first_net_device(struct net *net)
{
	return list_empty(&net->dev_base_head) ? NULL :
		net_device_entry(net->dev_base_head.next);
}
2215

E
Eric Dumazet 已提交
2216 2217 2218 2219 2220 2221 2222
static inline struct net_device *first_net_device_rcu(struct net *net)
{
	struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));

	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}

2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234
int netdev_boot_setup_check(struct net_device *dev);
unsigned long netdev_boot_base(const char *prefix, int unit);
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
				       const char *hwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
void dev_add_pack(struct packet_type *pt);
void dev_remove_pack(struct packet_type *pt);
void __dev_remove_pack(struct packet_type *pt);
void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);

2235
int dev_get_iflink(const struct net_device *dev);
2236
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2237 2238
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
				      unsigned short mask);
2239 2240 2241 2242 2243 2244
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev);
int dev_close(struct net_device *dev);
2245
int dev_close_many(struct list_head *head, bool unlink);
2246
void dev_disable_lro(struct net_device *dev);
2247
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2248
int dev_queue_xmit(struct sk_buff *skb);
2249
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
2250 2251 2252
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
2253 2254 2255 2256 2257
static inline void unregister_netdevice(struct net_device *dev)
{
	unregister_netdevice_queue(dev, NULL);
}

2258 2259
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
2260
void netdev_freemem(struct net_device *dev);
2261 2262
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
2263

2264 2265 2266 2267 2268 2269
DECLARE_PER_CPU(int, xmit_recursion);
static inline int dev_recursion_level(void)
{
	return this_cpu_read(xmit_recursion);
}

2270 2271 2272 2273 2274 2275
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291

static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
	return NAPI_GRO_CB(skb)->data_offset;
}

static inline unsigned int skb_gro_len(const struct sk_buff *skb)
{
	return skb->len - NAPI_GRO_CB(skb)->data_offset;
}

static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
{
	NAPI_GRO_CB(skb)->data_offset += len;
}

2292 2293
static inline void *skb_gro_header_fast(struct sk_buff *skb,
					unsigned int offset)
2294
{
2295 2296
	return NAPI_GRO_CB(skb)->frag0 + offset;
}
2297

2298 2299 2300 2301
static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
{
	return NAPI_GRO_CB(skb)->frag0_len < hlen;
}
2302

2303 2304 2305
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
					unsigned int offset)
{
2306 2307 2308
	if (!pskb_may_pull(skb, hlen))
		return NULL;

2309 2310
	NAPI_GRO_CB(skb)->frag0 = NULL;
	NAPI_GRO_CB(skb)->frag0_len = 0;
2311
	return skb->data + offset;
2312
}
L
Linus Torvalds 已提交
2313

H
Herbert Xu 已提交
2314 2315
static inline void *skb_gro_network_header(struct sk_buff *skb)
{
2316 2317
	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
	       skb_network_offset(skb);
H
Herbert Xu 已提交
2318 2319
}

2320 2321 2322
static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
					const void *start, unsigned int len)
{
T
Tom Herbert 已提交
2323
	if (NAPI_GRO_CB(skb)->csum_valid)
2324 2325 2326 2327
		NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
						  csum_partial(start, len, 0));
}

T
Tom Herbert 已提交
2328 2329 2330 2331 2332 2333 2334
/* GRO checksum functions. These are logical equivalents of the normal
 * checksum functions (in skbuff.h) except that they operate on the GRO
 * offsets and fields in sk_buff.
 */

__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);

2335 2336
static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
{
2337
	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2338 2339
}

T
Tom Herbert 已提交
2340 2341 2342 2343
static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
						      bool zero_okay,
						      __sum16 check)
{
2344 2345 2346
	return ((skb->ip_summed != CHECKSUM_PARTIAL ||
		skb_checksum_start_offset(skb) <
		 skb_gro_offset(skb)) &&
2347
		!skb_at_gro_remcsum_start(skb) &&
2348
		NAPI_GRO_CB(skb)->csum_cnt == 0 &&
T
Tom Herbert 已提交
2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
		(!zero_okay || check));
}

static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
							   __wsum psum)
{
	if (NAPI_GRO_CB(skb)->csum_valid &&
	    !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
		return 0;

	NAPI_GRO_CB(skb)->csum = psum;

	return __skb_gro_checksum_complete(skb);
}

static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
{
2366 2367 2368 2369 2370 2371 2372 2373 2374
	if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
		/* Consume a checksum from CHECKSUM_UNNECESSARY */
		NAPI_GRO_CB(skb)->csum_cnt--;
	} else {
		/* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
		 * verified a new top level checksum or an encapsulated one
		 * during GRO. This saves work if we fallback to normal path.
		 */
		__skb_incr_checksum_unnecessary(skb);
T
Tom Herbert 已提交
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384
	}
}

#define __skb_gro_checksum_validate(skb, proto, zero_okay, check,	\
				    compute_pseudo)			\
({									\
	__sum16 __ret = 0;						\
	if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))	\
		__ret = __skb_gro_checksum_validate_complete(skb,	\
				compute_pseudo(skb, proto));		\
2385 2386 2387
	if (__ret)							\
		__skb_mark_checksum_bad(skb);				\
	else								\
T
Tom Herbert 已提交
2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
		skb_gro_incr_csum_unnecessary(skb);			\
	__ret;								\
})

#define skb_gro_checksum_validate(skb, proto, compute_pseudo)		\
	__skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)

#define skb_gro_checksum_validate_zero_check(skb, proto, check,		\
					     compute_pseudo)		\
	__skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)

#define skb_gro_checksum_simple_validate(skb)				\
	__skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)

2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421
static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
{
	return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
		!NAPI_GRO_CB(skb)->csum_valid);
}

static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
					      __sum16 check, __wsum pseudo)
{
	NAPI_GRO_CB(skb)->csum = ~pseudo;
	NAPI_GRO_CB(skb)->csum_valid = 1;
}

#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo)	\
do {									\
	if (__skb_gro_checksum_convert_check(skb))			\
		__skb_gro_checksum_convert(skb, check,			\
					   compute_pseudo(skb, proto));	\
} while (0)

2422 2423 2424 2425 2426 2427 2428
struct gro_remcsum {
	int offset;
	__wsum delta;
};

static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
{
2429
	grc->offset = 0;
2430 2431 2432
	grc->delta = 0;
}

2433 2434 2435 2436 2437
static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
					    unsigned int off, size_t hdrlen,
					    int start, int offset,
					    struct gro_remcsum *grc,
					    bool nopartial)
2438 2439
{
	__wsum delta;
2440
	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2441 2442 2443

	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);

2444
	if (!nopartial) {
2445 2446 2447 2448 2449 2450 2451 2452 2453
		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
		return ptr;
	}

	ptr = skb_gro_header_fast(skb, off);
	if (skb_gro_header_hard(skb, off + plen)) {
		ptr = skb_gro_header_slow(skb, off + plen, off);
		if (!ptr)
			return NULL;
2454 2455
	}

2456 2457
	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
			       start, offset);
2458 2459 2460

	/* Adjust skb->csum since we changed the packet */
	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2461

2462
	grc->offset = off + hdrlen + offset;
2463
	grc->delta = delta;
2464 2465

	return ptr;
2466 2467
}

2468 2469 2470
static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
					   struct gro_remcsum *grc)
{
2471 2472 2473
	void *ptr;
	size_t plen = grc->offset + sizeof(u16);

2474 2475 2476
	if (!grc->delta)
		return;

2477 2478 2479 2480 2481 2482 2483 2484
	ptr = skb_gro_header_fast(skb, grc->offset);
	if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
		ptr = skb_gro_header_slow(skb, plen, grc->offset);
		if (!ptr)
			return;
	}

	remcsum_unadjust((__sum16 *)ptr, grc->delta);
2485
}
2486

2487 2488
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
				  unsigned short type,
2489
				  const void *daddr, const void *saddr,
2490
				  unsigned int len)
2491
{
2492
	if (!dev->header_ops || !dev->header_ops->create)
2493
		return 0;
2494 2495

	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2496 2497
}

S
Stephen Hemminger 已提交
2498 2499 2500 2501 2502
static inline int dev_parse_header(const struct sk_buff *skb,
				   unsigned char *haddr)
{
	const struct net_device *dev = skb->dev;

2503
	if (!dev->header_ops || !dev->header_ops->parse)
S
Stephen Hemminger 已提交
2504
		return 0;
2505
	return dev->header_ops->parse(skb, haddr);
S
Stephen Hemminger 已提交
2506 2507
}

L
Linus Torvalds 已提交
2508
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
2509
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
L
Linus Torvalds 已提交
2510 2511 2512 2513 2514
static inline int unregister_gifconf(unsigned int family)
{
	return register_gifconf(family, NULL);
}

2515
#ifdef CONFIG_NET_FLOW_LIMIT
2516
#define FLOW_LIMIT_HISTORY	(1 << 7)  /* must be ^2 and !overflow buckets */
2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
struct sd_flow_limit {
	u64			count;
	unsigned int		num_buckets;
	unsigned int		history_head;
	u16			history[FLOW_LIMIT_HISTORY];
	u8			buckets[];
};

extern int netdev_flow_limit_table_len;
#endif /* CONFIG_NET_FLOW_LIMIT */

L
Linus Torvalds 已提交
2528
/*
E
Eric Dumazet 已提交
2529
 * Incoming packets are placed on per-cpu queues
L
Linus Torvalds 已提交
2530
 */
E
Eric Dumazet 已提交
2531
struct softnet_data {
L
Linus Torvalds 已提交
2532
	struct list_head	poll_list;
2533
	struct sk_buff_head	process_queue;
L
Linus Torvalds 已提交
2534

C
Changli Gao 已提交
2535
	/* stats */
2536 2537 2538 2539
	unsigned int		processed;
	unsigned int		time_squeeze;
	unsigned int		cpu_collision;
	unsigned int		received_rps;
2540
#ifdef CONFIG_RPS
E
Eric Dumazet 已提交
2541
	struct softnet_data	*rps_ipi_list;
E
Eric Dumazet 已提交
2542 2543 2544 2545 2546 2547 2548
#endif
#ifdef CONFIG_NET_FLOW_LIMIT
	struct sd_flow_limit __rcu *flow_limit;
#endif
	struct Qdisc		*output_queue;
	struct Qdisc		**output_queue_tailp;
	struct sk_buff		*completion_queue;
E
Eric Dumazet 已提交
2549

E
Eric Dumazet 已提交
2550
#ifdef CONFIG_RPS
E
Eric Dumazet 已提交
2551
	/* Elements below can be accessed between CPUs for RPS */
T
Tom Herbert 已提交
2552
	struct call_single_data	csd ____cacheline_aligned_in_smp;
E
Eric Dumazet 已提交
2553 2554
	struct softnet_data	*rps_ipi_next;
	unsigned int		cpu;
T
Tom Herbert 已提交
2555
	unsigned int		input_queue_head;
2556
	unsigned int		input_queue_tail;
2557
#endif
2558
	unsigned int		dropped;
T
Tom Herbert 已提交
2559
	struct sk_buff_head	input_pkt_queue;
2560
	struct napi_struct	backlog;
2561

L
Linus Torvalds 已提交
2562 2563
};

2564
static inline void input_queue_head_incr(struct softnet_data *sd)
T
Tom Herbert 已提交
2565 2566
{
#ifdef CONFIG_RPS
2567 2568 2569 2570 2571 2572 2573 2574 2575
	sd->input_queue_head++;
#endif
}

static inline void input_queue_tail_incr_save(struct softnet_data *sd,
					      unsigned int *qtail)
{
#ifdef CONFIG_RPS
	*qtail = ++sd->input_queue_tail;
T
Tom Herbert 已提交
2576 2577 2578
#endif
}

T
Tom Herbert 已提交
2579
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
L
Linus Torvalds 已提交
2580

2581
void __netif_schedule(struct Qdisc *q);
2582
void netif_schedule_queue(struct netdev_queue *txq);
2583

2584 2585 2586 2587 2588 2589 2590 2591
static inline void netif_tx_schedule_all(struct net_device *dev)
{
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++)
		netif_schedule_queue(netdev_get_tx_queue(dev, i));
}

2592 2593
static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
2594
	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2595 2596
}

2597 2598 2599 2600 2601 2602
/**
 *	netif_start_queue - allow transmit
 *	@dev: network device
 *
 *	Allow upper layers to call the device hard_start_xmit routine.
 */
L
Linus Torvalds 已提交
2603 2604
static inline void netif_start_queue(struct net_device *dev)
{
2605
	netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
L
Linus Torvalds 已提交
2606 2607
}

2608 2609 2610 2611 2612 2613 2614 2615 2616 2617
static inline void netif_tx_start_all_queues(struct net_device *dev)
{
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		netif_tx_start_queue(txq);
	}
}

2618
void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2619

2620 2621 2622 2623 2624 2625 2626
/**
 *	netif_wake_queue - restart transmit
 *	@dev: network device
 *
 *	Allow upper layers to call the device hard_start_xmit routine.
 *	Used for flow control when transmit resources are available.
 */
2627 2628
static inline void netif_wake_queue(struct net_device *dev)
{
2629
	netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
L
Linus Torvalds 已提交
2630 2631
}

2632 2633 2634 2635 2636 2637 2638 2639 2640 2641
static inline void netif_tx_wake_all_queues(struct net_device *dev)
{
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		netif_tx_wake_queue(txq);
	}
}

2642 2643
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
2644
	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2645 2646
}

2647 2648 2649 2650 2651 2652 2653
/**
 *	netif_stop_queue - stop transmitted packets
 *	@dev: network device
 *
 *	Stop upper layers calling the device hard_start_xmit routine.
 *	Used for flow control when transmit resources are unavailable.
 */
L
Linus Torvalds 已提交
2654 2655
static inline void netif_stop_queue(struct net_device *dev)
{
2656
	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
L
Linus Torvalds 已提交
2657 2658
}

2659
void netif_tx_stop_all_queues(struct net_device *dev);
2660

2661
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2662
{
2663
	return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2664 2665
}

2666 2667 2668 2669 2670 2671
/**
 *	netif_queue_stopped - test if transmit queue is flowblocked
 *	@dev: network device
 *
 *	Test if transmit queue on device is currently unable to send.
 */
2672
static inline bool netif_queue_stopped(const struct net_device *dev)
L
Linus Torvalds 已提交
2673
{
2674
	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
L
Linus Torvalds 已提交
2675 2676
}

2677
static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2678
{
2679 2680 2681
	return dev_queue->state & QUEUE_STATE_ANY_XOFF;
}

2682 2683
static inline bool
netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2684 2685 2686 2687
{
	return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
}

2688 2689 2690 2691 2692 2693
static inline bool
netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
{
	return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
}

2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721
/**
 *	netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
 *	@dev_queue: pointer to transmit queue
 *
 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
 * to give appropriate hint to the cpu.
 */
static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_BQL
	prefetchw(&dev_queue->dql.num_queued);
#endif
}

/**
 *	netdev_txq_bql_complete_prefetchw - prefetch bql data for write
 *	@dev_queue: pointer to transmit queue
 *
 * BQL enabled drivers might use this helper in their TX completion path,
 * to give appropriate hint to the cpu.
 */
static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_BQL
	prefetchw(&dev_queue->dql.limit);
#endif
}

2722 2723 2724
static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
					unsigned int bytes)
{
T
Tom Herbert 已提交
2725 2726
#ifdef CONFIG_BQL
	dql_queued(&dev_queue->dql, bytes);
2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742

	if (likely(dql_avail(&dev_queue->dql) >= 0))
		return;

	set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);

	/*
	 * The XOFF flag must be set before checking the dql_avail below,
	 * because in netdev_tx_completed_queue we update the dql_completed
	 * before checking the XOFF flag.
	 */
	smp_mb();

	/* check again in case another CPU has just made room avail */
	if (unlikely(dql_avail(&dev_queue->dql) >= 0))
		clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
T
Tom Herbert 已提交
2743
#endif
2744 2745
}

2746 2747 2748 2749 2750 2751 2752 2753 2754
/**
 * 	netdev_sent_queue - report the number of bytes queued to hardware
 * 	@dev: network device
 * 	@bytes: number of bytes queued to the hardware device queue
 *
 * 	Report the number of bytes queued for sending/completion to the network
 * 	device hardware queue. @bytes should be a good approximation and should
 * 	exactly match netdev_completed_queue() @bytes
 */
2755 2756 2757 2758 2759 2760
static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{
	netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
}

static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2761
					     unsigned int pkts, unsigned int bytes)
2762
{
T
Tom Herbert 已提交
2763
#ifdef CONFIG_BQL
2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
	if (unlikely(!bytes))
		return;

	dql_completed(&dev_queue->dql, bytes);

	/*
	 * Without the memory barrier there is a small possiblity that
	 * netdev_tx_sent_queue will miss the update and cause the queue to
	 * be stopped forever
	 */
	smp_mb();

	if (dql_avail(&dev_queue->dql) < 0)
		return;

	if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
		netif_schedule_queue(dev_queue);
T
Tom Herbert 已提交
2781
#endif
2782 2783
}

2784 2785 2786 2787 2788 2789 2790 2791 2792 2793
/**
 * 	netdev_completed_queue - report bytes and packets completed by device
 * 	@dev: network device
 * 	@pkts: actual number of packets sent over the medium
 * 	@bytes: actual number of bytes sent over the medium
 *
 * 	Report the number of bytes and packets transmitted by the network device
 * 	hardware queue over the physical medium, @bytes must exactly match the
 * 	@bytes amount passed to netdev_sent_queue()
 */
2794
static inline void netdev_completed_queue(struct net_device *dev,
2795
					  unsigned int pkts, unsigned int bytes)
2796 2797 2798 2799 2800 2801
{
	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
}

static inline void netdev_tx_reset_queue(struct netdev_queue *q)
{
T
Tom Herbert 已提交
2802
#ifdef CONFIG_BQL
2803
	clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
T
Tom Herbert 已提交
2804 2805
	dql_reset(&q->dql);
#endif
2806 2807
}

2808 2809 2810 2811 2812 2813 2814
/**
 * 	netdev_reset_queue - reset the packets and bytes count of a network device
 * 	@dev_queue: network device
 *
 * 	Reset the bytes and packet count of a network device and clear the
 * 	software flow control OFF bit for this network device
 */
2815 2816 2817
static inline void netdev_reset_queue(struct net_device *dev_queue)
{
	netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
2818 2819
}

2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839
/**
 * 	netdev_cap_txqueue - check if selected tx queue exceeds device queues
 * 	@dev: network device
 * 	@queue_index: given tx queue index
 *
 * 	Returns 0 if given tx queue index >= number of device tx queues,
 * 	otherwise returns the originally passed tx queue index.
 */
static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
				     dev->name, queue_index,
				     dev->real_num_tx_queues);
		return 0;
	}

	return queue_index;
}

2840 2841 2842 2843 2844 2845
/**
 *	netif_running - test if up
 *	@dev: network device
 *
 *	Test if the device has been brought up.
 */
2846
static inline bool netif_running(const struct net_device *dev)
L
Linus Torvalds 已提交
2847 2848 2849 2850
{
	return test_bit(__LINK_STATE_START, &dev->state);
}

2851 2852 2853 2854 2855 2856
/*
 * Routines to manage the subqueues on a device.  We only need start
 * stop, and a check if it's stopped.  All other device management is
 * done at the overall netdevice level.
 * Also test the device if we're multiqueue.
 */
2857 2858 2859 2860 2861 2862 2863 2864

/**
 *	netif_start_subqueue - allow sending packets on subqueue
 *	@dev: network device
 *	@queue_index: sub queue index
 *
 * Start individual transmit queue of a device with multiple transmit queues.
 */
2865 2866
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{
2867
	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2868 2869

	netif_tx_start_queue(txq);
2870 2871
}

2872 2873 2874 2875 2876 2877 2878
/**
 *	netif_stop_subqueue - stop sending packets on subqueue
 *	@dev: network device
 *	@queue_index: sub queue index
 *
 * Stop individual transmit queue of a device with multiple transmit queues.
 */
2879 2880
static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
{
2881
	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2882
	netif_tx_stop_queue(txq);
2883 2884
}

2885 2886 2887 2888 2889 2890 2891
/**
 *	netif_subqueue_stopped - test status of subqueue
 *	@dev: network device
 *	@queue_index: sub queue index
 *
 * Check individual transmit queue of a device with multiple transmit queues.
 */
2892 2893
static inline bool __netif_subqueue_stopped(const struct net_device *dev,
					    u16 queue_index)
2894
{
2895
	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2896 2897

	return netif_tx_queue_stopped(txq);
2898 2899
}

2900 2901
static inline bool netif_subqueue_stopped(const struct net_device *dev,
					  struct sk_buff *skb)
2902 2903 2904
{
	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
2905

2906
void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
2907

2908
#ifdef CONFIG_XPS
2909
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2910
			u16 index);
2911 2912
#else
static inline int netif_set_xps_queue(struct net_device *dev,
2913
				      const struct cpumask *mask,
2914 2915 2916 2917 2918 2919
				      u16 index)
{
	return 0;
}
#endif

J
Jiri Pirko 已提交
2920 2921 2922
u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
		  unsigned int num_tx_queues);

2923 2924 2925 2926 2927
/*
 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
 * as a distribution range limit for the returned value.
 */
static inline u16 skb_tx_hash(const struct net_device *dev,
2928
			      struct sk_buff *skb)
2929 2930 2931 2932
{
	return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
}

2933 2934 2935 2936 2937 2938
/**
 *	netif_is_multiqueue - test if device has multiple transmit queues
 *	@dev: network device
 *
 * Check if device has multiple transmit queues
 */
2939
static inline bool netif_is_multiqueue(const struct net_device *dev)
2940
{
E
Eric Dumazet 已提交
2941
	return dev->num_tx_queues > 1;
2942
}
L
Linus Torvalds 已提交
2943

2944
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
2945

2946
#ifdef CONFIG_SYSFS
2947
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
2948 2949 2950 2951 2952 2953 2954 2955
#else
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
						unsigned int rxq)
{
	return 0;
}
#endif

2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967
#ifdef CONFIG_SYSFS
static inline unsigned int get_netdev_rx_queue_index(
		struct netdev_rx_queue *queue)
{
	struct net_device *dev = queue->dev;
	int index = queue - dev->_rx;

	BUG_ON(index >= dev->num_rx_queues);
	return index;
}
#endif

2968
#define DEFAULT_MAX_NUM_RSS_QUEUES	(8)
2969
int netif_get_num_default_rss_queues(void);
2970

2971 2972 2973 2974 2975 2976 2977
enum skb_free_reason {
	SKB_REASON_CONSUMED,
	SKB_REASON_DROPPED,
};

void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
L
Linus Torvalds 已提交
2978

2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996
/*
 * It is not allowed to call kfree_skb() or consume_skb() from hardware
 * interrupt context or with hardware interrupts being disabled.
 * (in_irq() || irqs_disabled())
 *
 * We provide four helpers that can be used in following contexts :
 *
 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
 *  replacing kfree_skb(skb)
 *
 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
 *  Typically used in place of consume_skb(skb) in TX completion path
 *
 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
 *  replacing kfree_skb(skb)
 *
 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
 *  and consumed a packet. Used in place of consume_skb(skb)
L
Linus Torvalds 已提交
2997
 */
2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016
static inline void dev_kfree_skb_irq(struct sk_buff *skb)
{
	__dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
}

static inline void dev_consume_skb_irq(struct sk_buff *skb)
{
	__dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
}

static inline void dev_kfree_skb_any(struct sk_buff *skb)
{
	__dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
}

static inline void dev_consume_skb_any(struct sk_buff *skb)
{
	__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
}
L
Linus Torvalds 已提交
3017

3018 3019
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
3020
int netif_receive_skb(struct sk_buff *skb);
3021 3022 3023 3024
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
3025 3026
struct packet_offload *gro_find_receive_by_type(__be16 type);
struct packet_offload *gro_find_complete_by_type(__be16 type);
3027 3028 3029 3030 3031 3032 3033

static inline void napi_free_frags(struct napi_struct *napi)
{
	kfree_skb(napi->skb);
	napi->skb = NULL;
}

3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044
int netdev_rx_handler_register(struct net_device *dev,
			       rx_handler_func_t *rx_handler,
			       void *rx_handler_data);
void netdev_rx_handler_unregister(struct net_device *dev);

bool dev_valid_name(const char *name);
int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
int dev_ethtool(struct net *net, struct ifreq *);
unsigned int dev_get_flags(const struct net_device *);
int __dev_change_flags(struct net_device *, unsigned int flags);
int dev_change_flags(struct net_device *, unsigned int);
3045 3046
void __dev_notify_flags(struct net_device *, unsigned int old_flags,
			unsigned int gchanges);
3047 3048 3049 3050 3051 3052 3053 3054
int dev_change_name(struct net_device *, const char *);
int dev_set_alias(struct net_device *, const char *, size_t);
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
int dev_set_mtu(struct net_device *, int);
void dev_set_group(struct net_device *, int);
int dev_set_mac_address(struct net_device *, struct sockaddr *);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
3055
			 struct netdev_phys_item_id *ppid);
3056 3057
int dev_get_phys_port_name(struct net_device *dev,
			   char *name, size_t len);
3058
int dev_change_proto_down(struct net_device *dev, bool proto_down);
3059
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
3060 3061
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
				    struct netdev_queue *txq, int *ret);
H
Herbert Xu 已提交
3062
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3063
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3064
bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
L
Linus Torvalds 已提交
3065

3066
extern int		netdev_budget;
L
Linus Torvalds 已提交
3067 3068

/* Called by rtnetlink.c:rtnl_unlock() */
3069
void netdev_run_todo(void);
L
Linus Torvalds 已提交
3070

3071 3072 3073 3074
/**
 *	dev_put - release reference to device
 *	@dev: network device
 *
3075
 * Release reference to device to allow it to be freed.
3076
 */
L
Linus Torvalds 已提交
3077 3078
static inline void dev_put(struct net_device *dev)
{
3079
	this_cpu_dec(*dev->pcpu_refcnt);
L
Linus Torvalds 已提交
3080 3081
}

3082 3083 3084 3085
/**
 *	dev_hold - get reference to device
 *	@dev: network device
 *
3086
 * Hold reference to device to keep it from being freed.
3087
 */
3088 3089
static inline void dev_hold(struct net_device *dev)
{
3090
	this_cpu_inc(*dev->pcpu_refcnt);
3091
}
L
Linus Torvalds 已提交
3092 3093 3094 3095

/* Carrier loss detection, dial on demand. The functions netif_carrier_on
 * and _off may be called from IRQ context, but it is caller
 * who is responsible for serialization of these calls.
S
Stefan Rompf 已提交
3096 3097 3098 3099
 *
 * The name carrier is inappropriate, these functions should really be
 * called netif_lowerlayer_*() because they represent the state of any
 * kind of lower layer not just hardware media.
L
Linus Torvalds 已提交
3100 3101
 */

3102 3103 3104
void linkwatch_init_dev(struct net_device *dev);
void linkwatch_fire_event(struct net_device *dev);
void linkwatch_forget_dev(struct net_device *dev);
L
Linus Torvalds 已提交
3105

3106 3107 3108 3109 3110 3111
/**
 *	netif_carrier_ok - test if carrier present
 *	@dev: network device
 *
 * Check if carrier is present on device
 */
3112
static inline bool netif_carrier_ok(const struct net_device *dev)
L
Linus Torvalds 已提交
3113 3114 3115 3116
{
	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}

3117
unsigned long dev_trans_start(struct net_device *dev);
3118

3119
void __netdev_watchdog_up(struct net_device *dev);
L
Linus Torvalds 已提交
3120

3121
void netif_carrier_on(struct net_device *dev);
L
Linus Torvalds 已提交
3122

3123
void netif_carrier_off(struct net_device *dev);
L
Linus Torvalds 已提交
3124

3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137
/**
 *	netif_dormant_on - mark device as dormant.
 *	@dev: network device
 *
 * Mark device as dormant (as per RFC2863).
 *
 * The dormant state indicates that the relevant interface is not
 * actually in a condition to pass packets (i.e., it is not 'up') but is
 * in a "pending" state, waiting for some external event.  For "on-
 * demand" interfaces, this new state identifies the situation where the
 * interface is waiting for events to place it in the up state.
 *
 */
S
Stefan Rompf 已提交
3138 3139 3140 3141 3142 3143
static inline void netif_dormant_on(struct net_device *dev)
{
	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
		linkwatch_fire_event(dev);
}

3144 3145 3146 3147 3148 3149
/**
 *	netif_dormant_off - set device as not dormant.
 *	@dev: network device
 *
 * Device is not in dormant state.
 */
S
Stefan Rompf 已提交
3150 3151 3152 3153 3154 3155
static inline void netif_dormant_off(struct net_device *dev)
{
	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
		linkwatch_fire_event(dev);
}

3156 3157 3158 3159 3160 3161
/**
 *	netif_dormant - test if carrier present
 *	@dev: network device
 *
 * Check if carrier is present on device
 */
3162
static inline bool netif_dormant(const struct net_device *dev)
S
Stefan Rompf 已提交
3163 3164 3165 3166 3167
{
	return test_bit(__LINK_STATE_DORMANT, &dev->state);
}


3168 3169 3170 3171 3172 3173
/**
 *	netif_oper_up - test if device is operational
 *	@dev: network device
 *
 * Check if carrier is operational
 */
3174
static inline bool netif_oper_up(const struct net_device *dev)
E
Eric Dumazet 已提交
3175
{
S
Stefan Rompf 已提交
3176 3177 3178 3179
	return (dev->operstate == IF_OPER_UP ||
		dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
}

3180 3181 3182 3183 3184 3185
/**
 *	netif_device_present - is device available or removed
 *	@dev: network device
 *
 * Check if device has not been removed from system.
 */
3186
static inline bool netif_device_present(struct net_device *dev)
L
Linus Torvalds 已提交
3187 3188 3189 3190
{
	return test_bit(__LINK_STATE_PRESENT, &dev->state);
}

3191
void netif_device_detach(struct net_device *dev);
L
Linus Torvalds 已提交
3192

3193
void netif_device_attach(struct net_device *dev);
L
Linus Torvalds 已提交
3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243

/*
 * Network interface message level settings
 */

enum {
	NETIF_MSG_DRV		= 0x0001,
	NETIF_MSG_PROBE		= 0x0002,
	NETIF_MSG_LINK		= 0x0004,
	NETIF_MSG_TIMER		= 0x0008,
	NETIF_MSG_IFDOWN	= 0x0010,
	NETIF_MSG_IFUP		= 0x0020,
	NETIF_MSG_RX_ERR	= 0x0040,
	NETIF_MSG_TX_ERR	= 0x0080,
	NETIF_MSG_TX_QUEUED	= 0x0100,
	NETIF_MSG_INTR		= 0x0200,
	NETIF_MSG_TX_DONE	= 0x0400,
	NETIF_MSG_RX_STATUS	= 0x0800,
	NETIF_MSG_PKTDATA	= 0x1000,
	NETIF_MSG_HW		= 0x2000,
	NETIF_MSG_WOL		= 0x4000,
};

#define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
#define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
#define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
#define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
#define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
#define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
#define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
#define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
#define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
#define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
#define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
#define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
#define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
#define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
#define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)

static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
{
	/* use default */
	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
		return default_msg_enable_bits;
	if (debug_value == 0)	/* no output */
		return 0;
	/* set low N bits */
	return (1 << debug_value) - 1;
}

3244
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
H
Herbert Xu 已提交
3245
{
3246 3247
	spin_lock(&txq->_xmit_lock);
	txq->xmit_lock_owner = cpu;
3248 3249
}

3250 3251 3252 3253 3254 3255
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
	spin_lock_bh(&txq->_xmit_lock);
	txq->xmit_lock_owner = smp_processor_id();
}

3256
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3257
{
3258
	bool ok = spin_trylock(&txq->_xmit_lock);
3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275
	if (likely(ok))
		txq->xmit_lock_owner = smp_processor_id();
	return ok;
}

static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
	txq->xmit_lock_owner = -1;
	spin_unlock(&txq->_xmit_lock);
}

static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
	txq->xmit_lock_owner = -1;
	spin_unlock_bh(&txq->_xmit_lock);
}

E
Eric Dumazet 已提交
3276 3277 3278 3279 3280 3281
static inline void txq_trans_update(struct netdev_queue *txq)
{
	if (txq->xmit_lock_owner != -1)
		txq->trans_start = jiffies;
}

3282 3283 3284 3285 3286 3287
/**
 *	netif_tx_lock - grab network device transmit lock
 *	@dev: network device
 *
 * Get network device transmit lock
 */
3288 3289
static inline void netif_tx_lock(struct net_device *dev)
{
3290
	unsigned int i;
3291
	int cpu;
3292

3293 3294
	spin_lock(&dev->tx_global_lock);
	cpu = smp_processor_id();
3295 3296
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3297 3298 3299 3300 3301 3302 3303

		/* We are the only thread of execution doing a
		 * freeze, but we have to grab the _xmit_lock in
		 * order to synchronize with threads which are in
		 * the ->hard_start_xmit() handler and already
		 * checked the frozen bit.
		 */
3304
		__netif_tx_lock(txq, cpu);
3305 3306
		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
		__netif_tx_unlock(txq);
3307
	}
H
Herbert Xu 已提交
3308 3309 3310 3311
}

static inline void netif_tx_lock_bh(struct net_device *dev)
{
3312 3313
	local_bh_disable();
	netif_tx_lock(dev);
H
Herbert Xu 已提交
3314 3315 3316 3317
}

static inline void netif_tx_unlock(struct net_device *dev)
{
3318 3319 3320 3321
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3322

3323 3324 3325 3326 3327
		/* No need to grab the _xmit_lock here.  If the
		 * queue is not stopped for another reason, we
		 * force a schedule.
		 */
		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
3328
		netif_schedule_queue(txq);
3329 3330
	}
	spin_unlock(&dev->tx_global_lock);
H
Herbert Xu 已提交
3331 3332 3333 3334
}

static inline void netif_tx_unlock_bh(struct net_device *dev)
{
3335 3336
	netif_tx_unlock(dev);
	local_bh_enable();
H
Herbert Xu 已提交
3337 3338
}

3339
#define HARD_TX_LOCK(dev, txq, cpu) {			\
3340
	if ((dev->features & NETIF_F_LLTX) == 0) {	\
3341
		__netif_tx_lock(txq, cpu);		\
3342 3343 3344
	}						\
}

3345 3346 3347 3348 3349
#define HARD_TX_TRYLOCK(dev, txq)			\
	(((dev->features & NETIF_F_LLTX) == 0) ?	\
		__netif_tx_trylock(txq) :		\
		true )

3350
#define HARD_TX_UNLOCK(dev, txq) {			\
3351
	if ((dev->features & NETIF_F_LLTX) == 0) {	\
3352
		__netif_tx_unlock(txq);			\
3353 3354 3355
	}						\
}

L
Linus Torvalds 已提交
3356 3357
static inline void netif_tx_disable(struct net_device *dev)
{
3358
	unsigned int i;
3359
	int cpu;
3360

3361 3362
	local_bh_disable();
	cpu = smp_processor_id();
3363 3364
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3365 3366

		__netif_tx_lock(txq, cpu);
3367
		netif_tx_stop_queue(txq);
3368
		__netif_tx_unlock(txq);
3369
	}
3370
	local_bh_enable();
L
Linus Torvalds 已提交
3371 3372
}

3373 3374 3375 3376 3377
static inline void netif_addr_lock(struct net_device *dev)
{
	spin_lock(&dev->addr_list_lock);
}

3378 3379
static inline void netif_addr_lock_nested(struct net_device *dev)
{
3380 3381 3382 3383 3384 3385
	int subclass = SINGLE_DEPTH_NESTING;

	if (dev->netdev_ops->ndo_get_lock_subclass)
		subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);

	spin_lock_nested(&dev->addr_list_lock, subclass);
3386 3387
}

3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402
static inline void netif_addr_lock_bh(struct net_device *dev)
{
	spin_lock_bh(&dev->addr_list_lock);
}

static inline void netif_addr_unlock(struct net_device *dev)
{
	spin_unlock(&dev->addr_list_lock);
}

static inline void netif_addr_unlock_bh(struct net_device *dev)
{
	spin_unlock_bh(&dev->addr_list_lock);
}

3403
/*
3404
 * dev_addrs walker. Should be used only for read access. Call with
3405 3406 3407
 * rcu_read_lock held.
 */
#define for_each_dev_addr(dev, ha) \
3408
		list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
3409

L
Linus Torvalds 已提交
3410 3411
/* These functions live elsewhere (drivers/net/net_init.c, but related) */

3412
void ether_setup(struct net_device *dev);
L
Linus Torvalds 已提交
3413 3414

/* Support for loadable net-drivers */
3415
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3416
				    unsigned char name_assign_type,
3417 3418
				    void (*setup)(struct net_device *),
				    unsigned int txqs, unsigned int rxqs);
3419 3420
#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
T
Tom Herbert 已提交
3421

3422 3423 3424
#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
			 count)
T
Tom Herbert 已提交
3425

3426 3427
int register_netdev(struct net_device *dev);
void unregister_netdev(struct net_device *dev);
3428

3429
/* General hardware address lists handling functions */
3430 3431 3432 3433
int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
		   struct netdev_hw_addr_list *from_list, int addr_len);
void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
		      struct netdev_hw_addr_list *from_list, int addr_len);
3434 3435 3436 3437 3438 3439 3440 3441 3442
int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
		       struct net_device *dev,
		       int (*sync)(struct net_device *, const unsigned char *),
		       int (*unsync)(struct net_device *,
				     const unsigned char *));
void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
			  struct net_device *dev,
			  int (*unsync)(struct net_device *,
					const unsigned char *));
3443
void __hw_addr_init(struct netdev_hw_addr_list *list);
3444

3445
/* Functions used for device addresses handling */
3446 3447 3448 3449 3450 3451
int dev_addr_add(struct net_device *dev, const unsigned char *addr,
		 unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
		 unsigned char addr_type);
void dev_addr_flush(struct net_device *dev);
int dev_addr_init(struct net_device *dev);
3452 3453

/* Functions used for unicast addresses handling */
3454 3455 3456 3457 3458 3459 3460 3461
int dev_uc_add(struct net_device *dev, const unsigned char *addr);
int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
int dev_uc_del(struct net_device *dev, const unsigned char *addr);
int dev_uc_sync(struct net_device *to, struct net_device *from);
int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
void dev_uc_unsync(struct net_device *to, struct net_device *from);
void dev_uc_flush(struct net_device *dev);
void dev_uc_init(struct net_device *dev);
3462

3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481
/**
 *  __dev_uc_sync - Synchonize device's unicast list
 *  @dev:  device to sync
 *  @sync: function to call if address should be added
 *  @unsync: function to call if address should be removed
 *
 *  Add newly added addresses to the interface, and release
 *  addresses that have been deleted.
 **/
static inline int __dev_uc_sync(struct net_device *dev,
				int (*sync)(struct net_device *,
					    const unsigned char *),
				int (*unsync)(struct net_device *,
					      const unsigned char *))
{
	return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
}

/**
3482
 *  __dev_uc_unsync - Remove synchronized addresses from device
3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494
 *  @dev:  device to sync
 *  @unsync: function to call if address should be removed
 *
 *  Remove all addresses that were added to the device by dev_uc_sync().
 **/
static inline void __dev_uc_unsync(struct net_device *dev,
				   int (*unsync)(struct net_device *,
						 const unsigned char *))
{
	__hw_addr_unsync_dev(&dev->uc, dev, unsync);
}

3495
/* Functions used for multicast addresses handling */
3496 3497 3498 3499 3500 3501 3502 3503 3504 3505
int dev_mc_add(struct net_device *dev, const unsigned char *addr);
int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
int dev_mc_del(struct net_device *dev, const unsigned char *addr);
int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
int dev_mc_sync(struct net_device *to, struct net_device *from);
int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
void dev_mc_unsync(struct net_device *to, struct net_device *from);
void dev_mc_flush(struct net_device *dev);
void dev_mc_init(struct net_device *dev);
3506

3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525
/**
 *  __dev_mc_sync - Synchonize device's multicast list
 *  @dev:  device to sync
 *  @sync: function to call if address should be added
 *  @unsync: function to call if address should be removed
 *
 *  Add newly added addresses to the interface, and release
 *  addresses that have been deleted.
 **/
static inline int __dev_mc_sync(struct net_device *dev,
				int (*sync)(struct net_device *,
					    const unsigned char *),
				int (*unsync)(struct net_device *,
					      const unsigned char *))
{
	return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
}

/**
3526
 *  __dev_mc_unsync - Remove synchronized addresses from device
3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538
 *  @dev:  device to sync
 *  @unsync: function to call if address should be removed
 *
 *  Remove all addresses that were added to the device by dev_mc_sync().
 **/
static inline void __dev_mc_unsync(struct net_device *dev,
				   int (*unsync)(struct net_device *,
						 const unsigned char *))
{
	__hw_addr_unsync_dev(&dev->mc, dev, unsync);
}

3539
/* Functions used for secondary unicast and multicast support */
3540 3541 3542 3543 3544 3545 3546
void dev_set_rx_mode(struct net_device *dev);
void __dev_set_rx_mode(struct net_device *dev);
int dev_set_promiscuity(struct net_device *dev, int inc);
int dev_set_allmulti(struct net_device *dev, int inc);
void netdev_state_change(struct net_device *dev);
void netdev_notify_peers(struct net_device *dev);
void netdev_features_change(struct net_device *dev);
L
Linus Torvalds 已提交
3547
/* Load a device via the kmod */
3548 3549 3550 3551 3552
void dev_load(struct net *net, const char *name);
struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
					struct rtnl_link_stats64 *storage);
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
			     const struct net_device_stats *netdev_stats);
3553

L
Linus Torvalds 已提交
3554
extern int		netdev_max_backlog;
E
Eric Dumazet 已提交
3555
extern int		netdev_tstamp_prequeue;
L
Linus Torvalds 已提交
3556
extern int		weight_p;
3557
extern int		bpf_jit_enable;
J
Jiri Pirko 已提交
3558

3559
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3560 3561
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
						     struct list_head **iter);
3562 3563
struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
						     struct list_head **iter);
3564

3565 3566 3567 3568 3569 3570 3571
/* iterate through upper list, must be called under RCU read lock */
#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
	for (iter = &(dev)->adj_list.upper, \
	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
	     updev; \
	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))

3572
/* iterate through upper list, must be called under RCU read lock */
3573 3574 3575 3576 3577
#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
	for (iter = &(dev)->all_adj_list.upper, \
	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
	     updev; \
	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
3578

3579 3580 3581 3582
void *netdev_lower_get_next_private(struct net_device *dev,
				    struct list_head **iter);
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
					struct list_head **iter);
3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595

#define netdev_for_each_lower_private(dev, priv, iter) \
	for (iter = (dev)->adj_list.lower.next, \
	     priv = netdev_lower_get_next_private(dev, &(iter)); \
	     priv; \
	     priv = netdev_lower_get_next_private(dev, &(iter)))

#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
	for (iter = &(dev)->adj_list.lower, \
	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
	     priv; \
	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))

3596 3597 3598 3599 3600 3601 3602 3603
void *netdev_lower_get_next(struct net_device *dev,
				struct list_head **iter);
#define netdev_for_each_lower_dev(dev, ldev, iter) \
	for (iter = &(dev)->adj_list.lower, \
	     ldev = netdev_lower_get_next(dev, &(iter)); \
	     ldev; \
	     ldev = netdev_lower_get_next(dev, &(iter)))

3604
void *netdev_adjacent_get_private(struct list_head *adj_list);
3605
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3606 3607 3608 3609
struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
int netdev_master_upper_dev_link(struct net_device *dev,
J
Jiri Pirko 已提交
3610
				 struct net_device *upper_dev);
3611 3612 3613 3614 3615
int netdev_master_upper_dev_link_private(struct net_device *dev,
					 struct net_device *upper_dev,
					 void *private);
void netdev_upper_dev_unlink(struct net_device *dev,
			     struct net_device *upper_dev);
3616
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3617 3618
void *netdev_lower_dev_get_private(struct net_device *dev,
				   struct net_device *lower_dev);
3619 3620 3621 3622 3623 3624

/* RSS keys are 40 or 52 bytes long */
#define NETDEV_RSS_KEY_LEN 52
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
void netdev_rss_key_fill(void *buffer, size_t len);

3625 3626
int dev_get_nest_level(struct net_device *dev,
		       bool (*type_check)(struct net_device *dev));
3627 3628 3629 3630 3631
int skb_checksum_help(struct sk_buff *skb);
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
				  netdev_features_t features, bool tx_path);
struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
				    netdev_features_t features);
3632

3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645
struct netdev_bonding_info {
	ifslave	slave;
	ifbond	master;
};

struct netdev_notifier_bonding_info {
	struct netdev_notifier_info info; /* must be first */
	struct netdev_bonding_info  bonding_info;
};

void netdev_bonding_info_change(struct net_device *dev,
				struct netdev_bonding_info *bonding_info);

3646 3647 3648 3649 3650
static inline
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
	return __skb_gso_segment(skb, features, true);
}
3651
__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663

static inline bool can_checksum_protocol(netdev_features_t features,
					 __be16 protocol)
{
	return ((features & NETIF_F_GEN_CSUM) ||
		((features & NETIF_F_V4_CSUM) &&
		 protocol == htons(ETH_P_IP)) ||
		((features & NETIF_F_V6_CSUM) &&
		 protocol == htons(ETH_P_IPV6)) ||
		((features & NETIF_F_FCOE_CRC) &&
		 protocol == htons(ETH_P_FCOE)));
}
3664

3665
#ifdef CONFIG_BUG
3666
void netdev_rx_csum_fault(struct net_device *dev);
3667 3668 3669 3670 3671
#else
static inline void netdev_rx_csum_fault(struct net_device *dev)
{
}
#endif
L
Linus Torvalds 已提交
3672
/* rx skb timestamps */
3673 3674
void net_enable_timestamp(void);
void net_disable_timestamp(void);
L
Linus Torvalds 已提交
3675

3676
#ifdef CONFIG_PROC_FS
3677
int __init dev_proc_init(void);
3678 3679
#else
#define dev_proc_init() 0
3680 3681
#endif

D
David S. Miller 已提交
3682
static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3683 3684
					      struct sk_buff *skb, struct net_device *dev,
					      bool more)
D
David S. Miller 已提交
3685
{
3686
	skb->xmit_more = more ? 1 : 0;
3687
	return ops->ndo_start_xmit(skb, dev);
D
David S. Miller 已提交
3688 3689
}

3690
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
3691
					    struct netdev_queue *txq, bool more)
D
David S. Miller 已提交
3692 3693
{
	const struct net_device_ops *ops = dev->netdev_ops;
3694
	int rc;
D
David S. Miller 已提交
3695

3696
	rc = __netdev_start_xmit(ops, skb, dev, more);
3697 3698 3699 3700
	if (rc == NETDEV_TX_OK)
		txq_trans_update(txq);

	return rc;
D
David S. Miller 已提交
3701 3702
}

3703 3704 3705 3706
int netdev_class_create_file_ns(struct class_attribute *class_attr,
				const void *ns);
void netdev_class_remove_file_ns(struct class_attribute *class_attr,
				 const void *ns);
3707 3708 3709 3710 3711 3712 3713 3714 3715 3716

static inline int netdev_class_create_file(struct class_attribute *class_attr)
{
	return netdev_class_create_file_ns(class_attr, NULL);
}

static inline void netdev_class_remove_file(struct class_attribute *class_attr)
{
	netdev_class_remove_file_ns(class_attr, NULL);
}
3717

3718 3719
extern struct kobj_ns_type_operations net_ns_type_operations;

3720
const char *netdev_drivername(const struct net_device *dev);
3721

3722
void linkwatch_run_queue(void);
3723

3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737
static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
							  netdev_features_t f2)
{
	if (f1 & NETIF_F_GEN_CSUM)
		f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
	if (f2 & NETIF_F_GEN_CSUM)
		f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
	f1 &= f2;
	if (f1 & NETIF_F_GEN_CSUM)
		f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);

	return f1;
}

3738 3739
static inline netdev_features_t netdev_get_wanted_features(
	struct net_device *dev)
3740 3741 3742
{
	return (dev->features & ~dev->hw_features) | dev->wanted_features;
}
3743 3744
netdev_features_t netdev_increment_features(netdev_features_t all,
	netdev_features_t one, netdev_features_t mask);
3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755

/* Allow TSO being used on stacked device :
 * Performing the GSO segmentation before last device
 * is a performance improvement.
 */
static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
							netdev_features_t mask)
{
	return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
}

3756
int __netdev_update_features(struct net_device *dev);
3757
void netdev_update_features(struct net_device *dev);
3758
void netdev_change_features(struct net_device *dev);
3759

3760 3761 3762
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
					struct net_device *dev);

3763 3764 3765
netdev_features_t passthru_features_check(struct sk_buff *skb,
					  struct net_device *dev,
					  netdev_features_t features);
3766
netdev_features_t netif_skb_features(struct sk_buff *skb);
3767

3768
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3769
{
3770
	netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
3771 3772 3773 3774 3775 3776 3777 3778

	/* check flags correspondence */
	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
3779 3780 3781 3782 3783 3784
	BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_IPIP    != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_SIT     != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
3785
	BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
3786

3787
	return (features & feature) == feature;
3788 3789
}

3790
static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
3791
{
H
Herbert Xu 已提交
3792
	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
3793
	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
3794 3795
}

3796
static inline bool netif_needs_gso(struct sk_buff *skb,
3797
				   netdev_features_t features)
3798
{
3799
	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
3800 3801
		unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
			 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
3802 3803
}

3804 3805 3806 3807 3808 3809
static inline void netif_set_gso_max_size(struct net_device *dev,
					  unsigned int size)
{
	dev->gso_max_size = size;
}

3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822
static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
					int pulled_hlen, u16 mac_offset,
					int mac_len)
{
	skb->protocol = protocol;
	skb->encapsulation = 1;
	skb_push(skb, pulled_hlen);
	skb_reset_transport_header(skb);
	skb->mac_header = mac_offset;
	skb->network_header = skb->mac_header + mac_len;
	skb->mac_len = mac_len;
}

3823 3824 3825 3826 3827
static inline bool netif_is_macvlan(struct net_device *dev)
{
	return dev->priv_flags & IFF_MACVLAN;
}

3828 3829 3830 3831 3832
static inline bool netif_is_macvlan_port(struct net_device *dev)
{
	return dev->priv_flags & IFF_MACVLAN_PORT;
}

3833 3834 3835 3836 3837 3838 3839 3840 3841 3842
static inline bool netif_is_ipvlan(struct net_device *dev)
{
	return dev->priv_flags & IFF_IPVLAN_SLAVE;
}

static inline bool netif_is_ipvlan_port(struct net_device *dev)
{
	return dev->priv_flags & IFF_IPVLAN_MASTER;
}

3843 3844 3845 3846 3847
static inline bool netif_is_bond_master(struct net_device *dev)
{
	return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
}

3848
static inline bool netif_is_bond_slave(struct net_device *dev)
J
Jiri Pirko 已提交
3849 3850 3851 3852
{
	return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}

3853 3854 3855 3856 3857
static inline bool netif_supports_nofcs(struct net_device *dev)
{
	return dev->priv_flags & IFF_SUPP_NOFCS;
}

3858
static inline bool netif_is_l3_master(const struct net_device *dev)
3859
{
3860
	return dev->priv_flags & IFF_L3MDEV_MASTER;
3861 3862
}

D
David Ahern 已提交
3863 3864 3865 3866 3867
static inline bool netif_is_l3_slave(const struct net_device *dev)
{
	return dev->priv_flags & IFF_L3MDEV_SLAVE;
}

3868 3869 3870 3871 3872
static inline bool netif_is_bridge_master(const struct net_device *dev)
{
	return dev->priv_flags & IFF_EBRIDGE;
}

3873 3874 3875 3876 3877
static inline bool netif_is_bridge_port(const struct net_device *dev)
{
	return dev->priv_flags & IFF_BRIDGE_PORT;
}

3878 3879 3880 3881 3882
static inline bool netif_is_ovs_master(const struct net_device *dev)
{
	return dev->priv_flags & IFF_OPENVSWITCH;
}

3883 3884 3885 3886 3887 3888
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
}

3889
extern struct pernet_operations __net_initdata loopback_net_ops;
3890

3891 3892 3893 3894 3895 3896
/* Logging, debugging and troubleshooting/diagnostic helpers. */

/* netdev_printk helpers, similar to dev_printk */

static inline const char *netdev_name(const struct net_device *dev)
{
3897 3898
	if (!dev->name[0] || strchr(dev->name, '%'))
		return "(unnamed net_device)";
3899 3900 3901
	return dev->name;
}

3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916
static inline const char *netdev_reg_state(const struct net_device *dev)
{
	switch (dev->reg_state) {
	case NETREG_UNINITIALIZED: return " (uninitialized)";
	case NETREG_REGISTERED: return "";
	case NETREG_UNREGISTERING: return " (unregistering)";
	case NETREG_UNREGISTERED: return " (unregistered)";
	case NETREG_RELEASED: return " (released)";
	case NETREG_DUMMY: return " (dummy)";
	}

	WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
	return " (unknown)";
}

3917
__printf(3, 4)
3918 3919
void netdev_printk(const char *level, const struct net_device *dev,
		   const char *format, ...);
3920
__printf(2, 3)
3921
void netdev_emerg(const struct net_device *dev, const char *format, ...);
3922
__printf(2, 3)
3923
void netdev_alert(const struct net_device *dev, const char *format, ...);
3924
__printf(2, 3)
3925
void netdev_crit(const struct net_device *dev, const char *format, ...);
3926
__printf(2, 3)
3927
void netdev_err(const struct net_device *dev, const char *format, ...);
3928
__printf(2, 3)
3929
void netdev_warn(const struct net_device *dev, const char *format, ...);
3930
__printf(2, 3)
3931
void netdev_notice(const struct net_device *dev, const char *format, ...);
3932
__printf(2, 3)
3933
void netdev_info(const struct net_device *dev, const char *format, ...);
3934

3935 3936 3937
#define MODULE_ALIAS_NETDEV(device) \
	MODULE_ALIAS("netdev-" device)

3938
#if defined(CONFIG_DYNAMIC_DEBUG)
3939 3940
#define netdev_dbg(__dev, format, args...)			\
do {								\
3941
	dynamic_netdev_dbg(__dev, format, ##args);		\
3942
} while (0)
3943 3944 3945
#elif defined(DEBUG)
#define netdev_dbg(__dev, format, args...)			\
	netdev_printk(KERN_DEBUG, __dev, format, ##args)
3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971
#else
#define netdev_dbg(__dev, format, args...)			\
({								\
	if (0)							\
		netdev_printk(KERN_DEBUG, __dev, format, ##args); \
})
#endif

#if defined(VERBOSE_DEBUG)
#define netdev_vdbg	netdev_dbg
#else

#define netdev_vdbg(dev, format, args...)			\
({								\
	if (0)							\
		netdev_printk(KERN_DEBUG, dev, format, ##args);	\
	0;							\
})
#endif

/*
 * netdev_WARN() acts like dev_printk(), but with the key difference
 * of using a WARN/WARN_ON to get the message out, including the
 * file/line information and a backtrace.
 */
#define netdev_WARN(dev, format, args...)			\
3972 3973
	WARN(1, "netdevice: %s%s\n" format, netdev_name(dev),	\
	     netdev_reg_state(dev), ##args)
3974

3975 3976 3977 3978 3979 3980 3981 3982
/* netif printk helpers, similar to netdev_printk */

#define netif_printk(priv, type, level, dev, fmt, args...)	\
do {					  			\
	if (netif_msg_##type(priv))				\
		netdev_printk(level, (dev), fmt, ##args);	\
} while (0)

3983 3984 3985 3986 3987 3988
#define netif_level(level, priv, type, dev, fmt, args...)	\
do {								\
	if (netif_msg_##type(priv))				\
		netdev_##level(dev, fmt, ##args);		\
} while (0)

3989
#define netif_emerg(priv, type, dev, fmt, args...)		\
3990
	netif_level(emerg, priv, type, dev, fmt, ##args)
3991
#define netif_alert(priv, type, dev, fmt, args...)		\
3992
	netif_level(alert, priv, type, dev, fmt, ##args)
3993
#define netif_crit(priv, type, dev, fmt, args...)		\
3994
	netif_level(crit, priv, type, dev, fmt, ##args)
3995
#define netif_err(priv, type, dev, fmt, args...)		\
3996
	netif_level(err, priv, type, dev, fmt, ##args)
3997
#define netif_warn(priv, type, dev, fmt, args...)		\
3998
	netif_level(warn, priv, type, dev, fmt, ##args)
3999
#define netif_notice(priv, type, dev, fmt, args...)		\
4000
	netif_level(notice, priv, type, dev, fmt, ##args)
4001
#define netif_info(priv, type, dev, fmt, args...)		\
4002
	netif_level(info, priv, type, dev, fmt, ##args)
4003

4004
#if defined(CONFIG_DYNAMIC_DEBUG)
4005 4006 4007
#define netif_dbg(priv, type, netdev, format, args...)		\
do {								\
	if (netif_msg_##type(priv))				\
4008
		dynamic_netdev_dbg(netdev, format, ##args);	\
4009
} while (0)
4010 4011 4012
#elif defined(DEBUG)
#define netif_dbg(priv, type, dev, format, args...)		\
	netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
4013 4014 4015 4016 4017 4018 4019 4020 4021 4022
#else
#define netif_dbg(priv, type, dev, format, args...)			\
({									\
	if (0)								\
		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
	0;								\
})
#endif

#if defined(VERBOSE_DEBUG)
4023
#define netif_vdbg	netif_dbg
4024 4025 4026 4027
#else
#define netif_vdbg(priv, type, dev, format, args...)		\
({								\
	if (0)							\
4028
		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4029 4030 4031
	0;							\
})
#endif
4032

4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062
/*
 *	The list of packet types we will receive (as opposed to discard)
 *	and the routines to invoke.
 *
 *	Why 16. Because with 16 the only overlap we get on a hash of the
 *	low nibble of the protocol value is RARP/SNAP/X.25.
 *
 *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
 *             sure which should go first, but I bet it won't make much
 *             difference if we are running VLANs.  The good news is that
 *             this protocol won't be in the list unless compiled in, so
 *             the average user (w/out VLANs) will not be adversely affected.
 *             --BLG
 *
 *		0800	IP
 *		8100    802.1Q VLAN
 *		0001	802.3
 *		0002	AX.25
 *		0004	802.2
 *		8035	RARP
 *		0005	SNAP
 *		0805	X.25
 *		0806	ARP
 *		8137	IPX
 *		0009	Localtalk
 *		86DD	IPv6
 */
#define PTYPE_HASH_SIZE	(16)
#define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)

4063
#endif	/* _LINUX_NETDEVICE_H */