netdevice.h 99.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Definitions for the Interfaces handler.
 *
 * Version:	@(#)dev.h	1.0.10	08/12/93
 *
10
 * Authors:	Ross Biro
L
Linus Torvalds 已提交
11 12 13
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14
 *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27
 *		Bjorn Ekwall. <bj0rn@blox.se>
 *              Pekka Riikonen <priikone@poseidon.pspt.fi>
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 *		Moved to /usr/include/linux for NET3
 */
#ifndef _LINUX_NETDEVICE_H
#define _LINUX_NETDEVICE_H

28
#include <linux/pm_qos.h>
A
Al Viro 已提交
29
#include <linux/timer.h>
30
#include <linux/bug.h>
31
#include <linux/delay.h>
A
Arun Sharma 已提交
32
#include <linux/atomic.h>
L
Linus Torvalds 已提交
33 34 35 36
#include <asm/cache.h>
#include <asm/byteorder.h>

#include <linux/percpu.h>
37
#include <linux/rculist.h>
38
#include <linux/dmaengine.h>
39
#include <linux/workqueue.h>
T
Tom Herbert 已提交
40
#include <linux/dynamic_queue_limits.h>
L
Linus Torvalds 已提交
41

42
#include <linux/ethtool.h>
43
#include <net/net_namespace.h>
44
#include <net/dsa.h>
J
Jeff Kirsher 已提交
45
#ifdef CONFIG_DCB
46 47
#include <net/dcbnl.h>
#endif
48
#include <net/netprio_cgroup.h>
49

50
#include <linux/netdev_features.h>
51
#include <linux/neighbour.h>
52
#include <uapi/linux/netdevice.h>
53

54
struct netpoll_info;
55
struct device;
56
struct phy_device;
57 58
/* 802.11 specific */
struct wireless_dev;
L
Linus Torvalds 已提交
59 60 61 62
					/* source back-compat hooks */
#define SET_ETHTOOL_OPS(netdev,ops) \
	( (netdev)->ethtool_ops = (ops) )

63 64
void netdev_set_default_ethtool_ops(struct net_device *dev,
				    const struct ethtool_ops *ops);
65

66 67 68 69
/* hardware address assignment types */
#define NET_ADDR_PERM		0	/* address is permanent (default) */
#define NET_ADDR_RANDOM		1	/* address is generated randomly */
#define NET_ADDR_STOLEN		2	/* address is stolen from other device */
J
Jiri Pirko 已提交
70 71
#define NET_ADDR_SET		3	/* address is set using
					 * dev_set_mac_address() */
72

73 74 75 76
/* Backlog congestion levels */
#define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
#define NET_RX_DROP		1	/* packet dropped */

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
/*
 * Transmit return codes: transmit return codes originate from three different
 * namespaces:
 *
 * - qdisc return codes
 * - driver transmit return codes
 * - errno values
 *
 * Drivers are allowed to return any one of those in their hard_start_xmit()
 * function. Real network devices commonly used with qdiscs should only return
 * the driver transmit return codes though - when qdiscs are used, the actual
 * transmission happens asynchronously, so the value is not propagated to
 * higher layers. Virtual network devices transmit synchronously, in this case
 * the driver transmit return codes are consumed by dev_queue_xmit(), all
 * others are propagated to higher layers.
 */

/* qdisc ->enqueue() return codes. */
#define NET_XMIT_SUCCESS	0x00
96 97 98 99
#define NET_XMIT_DROP		0x01	/* skb dropped			*/
#define NET_XMIT_CN		0x02	/* congestion notification	*/
#define NET_XMIT_POLICED	0x03	/* skb is shot by police	*/
#define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */
L
Linus Torvalds 已提交
100

101 102 103
/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
 * indicates that the device will soon be dropping packets, or already drops
 * some packets of the same priority; prompting us to send less aggressively. */
104
#define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e))
L
Linus Torvalds 已提交
105 106
#define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)

107
/* Driver transmit return codes */
108
#define NETDEV_TX_MASK		0xf0
109

110
enum netdev_tx {
111
	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */
112 113 114
	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */
	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/
	NETDEV_TX_LOCKED = 0x20,	/* driver tx lock was already taken */
115 116 117
};
typedef enum netdev_tx netdev_tx_t;

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
/*
 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
 */
static inline bool dev_xmit_complete(int rc)
{
	/*
	 * Positive cases with an skb consumed by a driver:
	 * - successful transmission (rc == NETDEV_TX_OK)
	 * - error while transmitting (rc < 0)
	 * - error while queueing to a different device (rc & NET_XMIT_MASK)
	 */
	if (likely(rc < NET_XMIT_MASK))
		return true;

	return false;
}

L
Linus Torvalds 已提交
136 137 138 139
/*
 *	Compute the worst case header length according to the protocols
 *	used.
 */
G
Graf Yang 已提交
140

141
#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
142 143 144 145 146
# if defined(CONFIG_MAC80211_MESH)
#  define LL_MAX_HEADER 128
# else
#  define LL_MAX_HEADER 96
# endif
L
Linus Torvalds 已提交
147
#else
148
# define LL_MAX_HEADER 32
L
Linus Torvalds 已提交
149 150
#endif

151 152
#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
    !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
L
Linus Torvalds 已提交
153 154 155 156 157 158
#define MAX_HEADER LL_MAX_HEADER
#else
#define MAX_HEADER (LL_MAX_HEADER + 48)
#endif

/*
159 160
 *	Old network device statistics. Fields are native words
 *	(unsigned long) so they can be read and written atomically.
L
Linus Torvalds 已提交
161
 */
G
Graf Yang 已提交
162

E
Eric Dumazet 已提交
163
struct net_device_stats {
164 165 166 167 168 169 170 171 172
	unsigned long	rx_packets;
	unsigned long	tx_packets;
	unsigned long	rx_bytes;
	unsigned long	tx_bytes;
	unsigned long	rx_errors;
	unsigned long	tx_errors;
	unsigned long	rx_dropped;
	unsigned long	tx_dropped;
	unsigned long	multicast;
L
Linus Torvalds 已提交
173 174
	unsigned long	collisions;
	unsigned long	rx_length_errors;
175 176 177 178 179
	unsigned long	rx_over_errors;
	unsigned long	rx_crc_errors;
	unsigned long	rx_frame_errors;
	unsigned long	rx_fifo_errors;
	unsigned long	rx_missed_errors;
L
Linus Torvalds 已提交
180 181 182 183 184 185 186 187 188 189 190 191 192
	unsigned long	tx_aborted_errors;
	unsigned long	tx_carrier_errors;
	unsigned long	tx_fifo_errors;
	unsigned long	tx_heartbeat_errors;
	unsigned long	tx_window_errors;
	unsigned long	rx_compressed;
	unsigned long	tx_compressed;
};


#include <linux/cache.h>
#include <linux/skbuff.h>

193
#ifdef CONFIG_RPS
194 195
#include <linux/static_key.h>
extern struct static_key rps_needed;
196 197
#endif

L
Linus Torvalds 已提交
198 199 200 201
struct neighbour;
struct neigh_parms;
struct sk_buff;

202 203 204 205
struct netdev_hw_addr {
	struct list_head	list;
	unsigned char		addr[MAX_ADDR_LEN];
	unsigned char		type;
J
Jiri Pirko 已提交
206 207 208 209
#define NETDEV_HW_ADDR_T_LAN		1
#define NETDEV_HW_ADDR_T_SAN		2
#define NETDEV_HW_ADDR_T_SLAVE		3
#define NETDEV_HW_ADDR_T_UNICAST	4
210 211
#define NETDEV_HW_ADDR_T_MULTICAST	5
	bool			global_use;
212
	int			sync_cnt;
213
	int			refcount;
214
	int			synced;
215 216 217
	struct rcu_head		rcu_head;
};

218 219 220 221 222
struct netdev_hw_addr_list {
	struct list_head	list;
	int			count;
};

223 224 225 226
#define netdev_hw_addr_list_count(l) ((l)->count)
#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
#define netdev_hw_addr_list_for_each(ha, l) \
	list_for_each_entry(ha, &(l)->list, list)
227

228 229 230 231
#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
#define netdev_for_each_uc_addr(ha, dev) \
	netdev_hw_addr_list_for_each(ha, &(dev)->uc)
232

233 234
#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
235
#define netdev_for_each_mc_addr(ha, dev) \
236
	netdev_hw_addr_list_for_each(ha, &(dev)->mc)
237

E
Eric Dumazet 已提交
238
struct hh_cache {
239
	u16		hh_len;
240
	u16		__pad;
241
	seqlock_t	hh_lock;
L
Linus Torvalds 已提交
242 243 244 245

	/* cached hardware header; allow for machine alignment needs.        */
#define HH_DATA_MOD	16
#define HH_DATA_OFF(__len) \
J
Jiri Benc 已提交
246
	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
L
Linus Torvalds 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259 260
#define HH_DATA_ALIGN(__len) \
	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
};

/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
 * Alternative is:
 *   dev->hard_header_len ? (dev->hard_header_len +
 *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
 *
 * We could use other alignment values, but we must maintain the
 * relationship HH alignment <= LL alignment.
 */
#define LL_RESERVED_SPACE(dev) \
261
	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
L
Linus Torvalds 已提交
262
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
263
	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
L
Linus Torvalds 已提交
264

265 266 267
struct header_ops {
	int	(*create) (struct sk_buff *skb, struct net_device *dev,
			   unsigned short type, const void *daddr,
268
			   const void *saddr, unsigned int len);
269 270
	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
	int	(*rebuild)(struct sk_buff *skb);
271
	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
272 273 274 275 276
	void	(*cache_update)(struct hh_cache *hh,
				const struct net_device *dev,
				const unsigned char *haddr);
};

L
Linus Torvalds 已提交
277 278 279 280 281
/* These flag bits are private to the generic network queueing
 * layer, they may not be explicitly referenced by any other
 * code.
 */

E
Eric Dumazet 已提交
282
enum netdev_state_t {
L
Linus Torvalds 已提交
283 284 285
	__LINK_STATE_START,
	__LINK_STATE_PRESENT,
	__LINK_STATE_NOCARRIER,
S
Stefan Rompf 已提交
286 287
	__LINK_STATE_LINKWATCH_PENDING,
	__LINK_STATE_DORMANT,
L
Linus Torvalds 已提交
288 289 290 291 292
};


/*
 * This structure holds at boot time configured netdevice settings. They
G
Graf Yang 已提交
293
 * are then used in the device probing.
L
Linus Torvalds 已提交
294 295 296 297 298 299 300
 */
struct netdev_boot_setup {
	char name[IFNAMSIZ];
	struct ifmap map;
};
#define NETDEV_BOOT_SETUP_MAX 8

301
int __init netdev_boot_setup(char *str);
L
Linus Torvalds 已提交
302

303 304 305 306 307 308 309 310 311 312 313 314 315 316
/*
 * Structure for NAPI scheduling similar to tasklet but with weighting
 */
struct napi_struct {
	/* The poll_list must only be managed by the entity which
	 * changes the state of the NAPI_STATE_SCHED bit.  This means
	 * whoever atomically sets that bit can add this napi_struct
	 * to the per-cpu poll_list, and whoever clears that bit
	 * can remove from the list right before clearing the bit.
	 */
	struct list_head	poll_list;

	unsigned long		state;
	int			weight;
317
	unsigned int		gro_count;
318 319 320 321 322
	int			(*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
	spinlock_t		poll_lock;
	int			poll_owner;
#endif
H
Herbert Xu 已提交
323
	struct net_device	*dev;
324
	struct sk_buff		*gro_list;
H
Herbert Xu 已提交
325
	struct sk_buff		*skb;
326
	struct list_head	dev_list;
E
Eliezer Tamir 已提交
327 328
	struct hlist_node	napi_hash_node;
	unsigned int		napi_id;
329 330
};

E
Eric Dumazet 已提交
331
enum {
332
	NAPI_STATE_SCHED,	/* Poll is scheduled */
D
David S. Miller 已提交
333
	NAPI_STATE_DISABLE,	/* Disable pending */
334
	NAPI_STATE_NPSVC,	/* Netpoll - don't dequeue from poll_list */
E
Eliezer Tamir 已提交
335
	NAPI_STATE_HASHED,	/* In NAPI hash */
336 337
};

338
enum gro_result {
339 340 341 342 343 344
	GRO_MERGED,
	GRO_MERGED_FREE,
	GRO_HELD,
	GRO_NORMAL,
	GRO_DROP,
};
345
typedef enum gro_result gro_result_t;
346

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
/*
 * enum rx_handler_result - Possible return values for rx_handlers.
 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
 * further.
 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
 * case skb->dev was changed by rx_handler.
 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
 *
 * rx_handlers are functions called from inside __netif_receive_skb(), to do
 * special processing of the skb, prior to delivery to protocol handlers.
 *
 * Currently, a net_device can only have a single rx_handler registered. Trying
 * to register a second rx_handler will return -EBUSY.
 *
 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
 * To unregister a rx_handler on a net_device, use
 * netdev_rx_handler_unregister().
 *
 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
 * do with the skb.
 *
 * If the rx_handler consumed to skb in some way, it should return
 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
 * the skb to be delivered in some other ways.
 *
 * If the rx_handler changed skb->dev, to divert the skb to another
 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
 * new device will be called if it exists.
 *
 * If the rx_handler consider the skb should be ignored, it should return
 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
379
 * are registered on exact device (ptype->dev == skb->dev).
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
 *
 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
 * delivered, it should return RX_HANDLER_PASS.
 *
 * A device without a registered rx_handler will behave as if rx_handler
 * returned RX_HANDLER_PASS.
 */

enum rx_handler_result {
	RX_HANDLER_CONSUMED,
	RX_HANDLER_ANOTHER,
	RX_HANDLER_EXACT,
	RX_HANDLER_PASS,
};
typedef enum rx_handler_result rx_handler_result_t;
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
396

397
void __napi_schedule(struct napi_struct *n);
398

399
static inline bool napi_disable_pending(struct napi_struct *n)
D
David S. Miller 已提交
400 401 402 403
{
	return test_bit(NAPI_STATE_DISABLE, &n->state);
}

404 405 406 407 408 409
/**
 *	napi_schedule_prep - check if napi can be scheduled
 *	@n: napi context
 *
 * Test if NAPI routine is already running, and if not mark
 * it as running.  This is used as a condition variable
D
David S. Miller 已提交
410 411
 * insure only one NAPI poll instance runs.  We also make
 * sure there is no pending NAPI disable.
412
 */
413
static inline bool napi_schedule_prep(struct napi_struct *n)
414
{
D
David S. Miller 已提交
415 416
	return !napi_disable_pending(n) &&
		!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
}

/**
 *	napi_schedule - schedule NAPI poll
 *	@n: napi context
 *
 * Schedule NAPI poll routine to be called if it is not already
 * running.
 */
static inline void napi_schedule(struct napi_struct *n)
{
	if (napi_schedule_prep(n))
		__napi_schedule(n);
}

432
/* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
433
static inline bool napi_reschedule(struct napi_struct *napi)
434 435 436
{
	if (napi_schedule_prep(napi)) {
		__napi_schedule(napi);
437
		return true;
438
	}
439
	return false;
440 441
}

442 443 444 445 446 447
/**
 *	napi_complete - NAPI processing complete
 *	@n: napi context
 *
 * Mark NAPI processing as complete.
 */
448 449
void __napi_complete(struct napi_struct *n);
void napi_complete(struct napi_struct *n);
450

E
Eliezer Tamir 已提交
451 452 453 454 455 456 457
/**
 *	napi_by_id - lookup a NAPI by napi_id
 *	@napi_id: hashed napi_id
 *
 * lookup @napi_id in napi_hash table
 * must be called under rcu_read_lock()
 */
458
struct napi_struct *napi_by_id(unsigned int napi_id);
E
Eliezer Tamir 已提交
459 460 461 462 463 464 465

/**
 *	napi_hash_add - add a NAPI to global hashtable
 *	@napi: napi context
 *
 * generate a new napi_id and store a @napi under it in napi_hash
 */
466
void napi_hash_add(struct napi_struct *napi);
E
Eliezer Tamir 已提交
467 468 469 470 471 472 473 474

/**
 *	napi_hash_del - remove a NAPI from global table
 *	@napi: napi context
 *
 * Warning: caller must observe rcu grace period
 * before freeing memory containing @napi
 */
475
void napi_hash_del(struct napi_struct *napi);
E
Eliezer Tamir 已提交
476

477 478 479 480 481 482 483 484 485
/**
 *	napi_disable - prevent NAPI from scheduling
 *	@n: napi context
 *
 * Stop NAPI from being scheduled on this context.
 * Waits till any outstanding processing completes.
 */
static inline void napi_disable(struct napi_struct *n)
{
486
	might_sleep();
D
David S. Miller 已提交
487
	set_bit(NAPI_STATE_DISABLE, &n->state);
488
	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
489
		msleep(1);
D
David S. Miller 已提交
490
	clear_bit(NAPI_STATE_DISABLE, &n->state);
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
}

/**
 *	napi_enable - enable NAPI scheduling
 *	@n: napi context
 *
 * Resume NAPI from being scheduled on this context.
 * Must be paired with napi_disable.
 */
static inline void napi_enable(struct napi_struct *n)
{
	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
	smp_mb__before_clear_bit();
	clear_bit(NAPI_STATE_SCHED, &n->state);
}

507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
#ifdef CONFIG_SMP
/**
 *	napi_synchronize - wait until NAPI is not running
 *	@n: napi context
 *
 * Wait until NAPI is done being scheduled on this context.
 * Waits till any outstanding processing completes but
 * does not disable future activations.
 */
static inline void napi_synchronize(const struct napi_struct *n)
{
	while (test_bit(NAPI_STATE_SCHED, &n->state))
		msleep(1);
}
#else
# define napi_synchronize(n)	barrier()
#endif

E
Eric Dumazet 已提交
525
enum netdev_queue_state_t {
526 527
	__QUEUE_STATE_DRV_XOFF,
	__QUEUE_STATE_STACK_XOFF,
528
	__QUEUE_STATE_FROZEN,
529 530 531 532
#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF)		| \
			      (1 << __QUEUE_STATE_STACK_XOFF))
#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF		| \
					(1 << __QUEUE_STATE_FROZEN))
533
};
534 535 536 537 538 539 540 541 542
/*
 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
 * netif_tx_* functions below are used to manipulate this flag.  The
 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
 * queue independently.  The netif_xmit_*stopped functions below are called
 * to check if the queue has been stopped by the driver or stack (either
 * of the XOFF bits are set in the state).  Drivers should not need to call
 * netif_xmit*stopped functions, they should only be using netif_tx_*.
 */
543

544
struct netdev_queue {
545 546 547
/*
 * read mostly part
 */
548
	struct net_device	*dev;
549 550
	struct Qdisc		*qdisc;
	struct Qdisc		*qdisc_sleeping;
551
#ifdef CONFIG_SYSFS
T
Tom Herbert 已提交
552 553
	struct kobject		kobj;
#endif
554 555 556
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
	int			numa_node;
#endif
557 558 559 560 561
/*
 * write mostly part
 */
	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
	int			xmit_lock_owner;
562 563 564 565
	/*
	 * please use this field instead of dev->trans_start
	 */
	unsigned long		trans_start;
566 567 568 569 570 571

	/*
	 * Number of TX timeouts for this queue
	 * (/sys/class/net/DEV/Q/trans_timeout)
	 */
	unsigned long		trans_timeout;
T
Tom Herbert 已提交
572 573 574 575 576 577

	unsigned long		state;

#ifdef CONFIG_BQL
	struct dql		dql;
#endif
578
} ____cacheline_aligned_in_smp;
579

580 581 582 583 584
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
{
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
	return q->numa_node;
#else
585
	return NUMA_NO_NODE;
586 587 588 589 590 591 592 593 594 595
#endif
}

static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
{
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
	q->numa_node = node;
#endif
}

E
Eric Dumazet 已提交
596
#ifdef CONFIG_RPS
T
Tom Herbert 已提交
597 598 599 600 601 602 603 604 605
/*
 * This structure holds an RPS map which can be of variable length.  The
 * map is an array of CPUs.
 */
struct rps_map {
	unsigned int len;
	struct rcu_head rcu;
	u16 cpus[0];
};
606
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
T
Tom Herbert 已提交
607

T
Tom Herbert 已提交
608
/*
609 610 611
 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
 * tail pointer for that CPU's input queue at the time of last enqueue, and
 * a hardware filter index.
T
Tom Herbert 已提交
612 613 614
 */
struct rps_dev_flow {
	u16 cpu;
615
	u16 filter;
T
Tom Herbert 已提交
616 617
	unsigned int last_qtail;
};
618
#define RPS_NO_FILTER 0xffff
T
Tom Herbert 已提交
619 620 621 622 623 624 625 626 627 628

/*
 * The rps_dev_flow_table structure contains a table of flow mappings.
 */
struct rps_dev_flow_table {
	unsigned int mask;
	struct rcu_head rcu;
	struct rps_dev_flow flows[0];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
629
    ((_num) * sizeof(struct rps_dev_flow)))
T
Tom Herbert 已提交
630 631 632 633 634 635 636 637 638 639

/*
 * The rps_sock_flow_table contains mappings of flows to the last CPU
 * on which they were processed by the application (set in recvmsg).
 */
struct rps_sock_flow_table {
	unsigned int mask;
	u16 ents[0];
};
#define	RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
640
    ((_num) * sizeof(u16)))
T
Tom Herbert 已提交
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664

#define RPS_NO_CPU 0xffff

static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
					u32 hash)
{
	if (table && hash) {
		unsigned int cpu, index = hash & table->mask;

		/* We only give a hint, preemption can change cpu under us */
		cpu = raw_smp_processor_id();

		if (table->ents[index] != cpu)
			table->ents[index] = cpu;
	}
}

static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
				       u32 hash)
{
	if (table && hash)
		table->ents[hash & table->mask] = RPS_NO_CPU;
}

E
Eric Dumazet 已提交
665
extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
T
Tom Herbert 已提交
666

667
#ifdef CONFIG_RFS_ACCEL
668 669
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
			 u16 filter_id);
670
#endif
671
#endif /* CONFIG_RPS */
672

T
Tom Herbert 已提交
673 674
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
675
#ifdef CONFIG_RPS
E
Eric Dumazet 已提交
676 677
	struct rps_map __rcu		*rps_map;
	struct rps_dev_flow_table __rcu	*rps_flow_table;
678
#endif
E
Eric Dumazet 已提交
679
	struct kobject			kobj;
T
Tom Herbert 已提交
680
	struct net_device		*dev;
T
Tom Herbert 已提交
681
} ____cacheline_aligned_in_smp;
682 683 684 685 686 687 688 689 690 691 692

/*
 * RX queue sysfs structures and functions.
 */
struct rx_queue_attribute {
	struct attribute attr;
	ssize_t (*show)(struct netdev_rx_queue *queue,
	    struct rx_queue_attribute *attr, char *buf);
	ssize_t (*store)(struct netdev_rx_queue *queue,
	    struct rx_queue_attribute *attr, const char *buf, size_t len);
};
693

T
Tom Herbert 已提交
694 695 696 697 698 699 700 701 702 703 704
#ifdef CONFIG_XPS
/*
 * This structure holds an XPS map which can be of variable length.  The
 * map is an array of queues.
 */
struct xps_map {
	unsigned int len;
	unsigned int alloc_len;
	struct rcu_head rcu;
	u16 queues[0];
};
705
#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
T
Tom Herbert 已提交
706 707 708 709 710 711 712 713
#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map))	\
    / sizeof(u16))

/*
 * This structure holds all XPS maps for device.  Maps are indexed by CPU.
 */
struct xps_dev_maps {
	struct rcu_head rcu;
E
Eric Dumazet 已提交
714
	struct xps_map __rcu *cpu_map[0];
T
Tom Herbert 已提交
715 716 717 718 719
};
#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) +		\
    (nr_cpu_ids * sizeof(struct xps_map *)))
#endif /* CONFIG_XPS */

720 721 722 723 724 725 726 727
#define TC_MAX_QUEUE	16
#define TC_BITMASK	15
/* HW offloaded queuing disciplines txq count and offset maps */
struct netdev_tc_txq {
	u16 count;
	u16 offset;
};

728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
/*
 * This structure is to hold information about the device
 * configured to run FCoE protocol stack.
 */
struct netdev_fcoe_hbainfo {
	char	manufacturer[64];
	char	serial_number[64];
	char	hardware_version[64];
	char	driver_version[64];
	char	optionrom_version[64];
	char	firmware_version[64];
	char	model[256];
	char	model_description[256];
};
#endif

745 746 747 748 749 750 751 752 753 754
#define MAX_PHYS_PORT_ID_LEN 32

/* This structure holds a unique identifier to identify the
 * physical port used by a netdevice.
 */
struct netdev_phys_port_id {
	unsigned char id[MAX_PHYS_PORT_ID_LEN];
	unsigned char id_len;
};

755 756 757
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
				       struct sk_buff *skb);

758 759
/*
 * This structure defines the management hooks for network devices.
760 761
 * The following hooks can be defined; unless noted otherwise, they are
 * optional and can be filled with a null pointer.
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
 *
 * int (*ndo_init)(struct net_device *dev);
 *     This function is called once when network device is registered.
 *     The network device can use this to any late stage initializaton
 *     or semantic validattion. It can fail with an error code which will
 *     be propogated back to register_netdev
 *
 * void (*ndo_uninit)(struct net_device *dev);
 *     This function is called when device is unregistered or when registration
 *     fails. It is not called if init fails.
 *
 * int (*ndo_open)(struct net_device *dev);
 *     This function is called when network device transistions to the up
 *     state.
 *
 * int (*ndo_stop)(struct net_device *dev);
 *     This function is called when network device transistions to the down
 *     state.
 *
781 782
 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
 *                               struct net_device *dev);
783
 *	Called when a packet needs to be transmitted.
784 785
 *	Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
 *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
786 787
 *	Required can not be NULL.
 *
788
 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
789
 *                         void *accel_priv, select_queue_fallback_t fallback);
790 791 792
 *	Called to decide which queue to when device supports multiple
 *	transmit queues.
 *
793 794 795 796 797 798
 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
 *	This function is called to allow device receiver to make
 *	changes to configuration when multicast or promiscious is enabled.
 *
 * void (*ndo_set_rx_mode)(struct net_device *dev);
 *	This function is called device changes address list filtering.
799 800
 *	If driver handles unicast address filtering, it should set
 *	IFF_UNICAST_FLT to its priv_flags.
801 802 803
 *
 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
 *	This function  is called when the Media Access Control address
804
 *	needs to be changed. If this interface is not defined, the
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
 *	mac address can not be changed.
 *
 * int (*ndo_validate_addr)(struct net_device *dev);
 *	Test if Media Access Control address is valid for the device.
 *
 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
 *	Called when a user request an ioctl which can't be handled by
 *	the generic interface code. If not defined ioctl's return
 *	not supported error code.
 *
 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
 *	Used to set network devices bus interface parameters. This interface
 *	is retained for legacy reason, new devices should use the bus
 *	interface (PCI) for low level management.
 *
 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
 *	Called when a user wants to change the Maximum Transfer Unit
 *	of a device. If not defined, any request to change MTU will
 *	will return an error.
 *
825
 * void (*ndo_tx_timeout)(struct net_device *dev);
826 827 828
 *	Callback uses when the transmitter has not made any progress
 *	for dev->watchdog ticks.
 *
829
 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
830
 *                      struct rtnl_link_stats64 *storage);
831
 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
832
 *	Called when a user wants to get the network device usage
833
 *	statistics. Drivers must do one of the following:
834 835
 *	1. Define @ndo_get_stats64 to fill in a zero-initialised
 *	   rtnl_link_stats64 structure passed by the caller.
836
 *	2. Define @ndo_get_stats to update a net_device_stats structure
837 838 839 840 841
 *	   (which should normally be dev->stats) and return a pointer to
 *	   it. The structure may be changed asynchronously only if each
 *	   field is written atomically.
 *	3. Update dev->stats asynchronously and atomically, and define
 *	   neither operation.
842
 *
843 844 845
 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
 *	If device support VLAN filtering this function is called when a
 *	VLAN id is registered.
846
 *
847
 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
848 849
 *	If device support VLAN filtering this function is called when a
 *	VLAN id is unregistered.
850 851
 *
 * void (*ndo_poll_controller)(struct net_device *dev);
852 853 854 855 856
 *
 *	SR-IOV management functions.
 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
857
 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
858 859
 * int (*ndo_get_vf_config)(struct net_device *dev,
 *			    int vf, struct ifla_vf_info *ivf);
860
 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
861 862 863
 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
 *			  struct nlattr *port[]);
 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
864 865 866 867 868
 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
 * 	Called to setup 'tc' number of traffic classes in the net device. This
 * 	is always called from the stack with the rtnl lock held and netif tx
 * 	queues stopped. This allows the netdevice to perform queue management
 * 	safely.
869
 *
870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
 *	Fiber Channel over Ethernet (FCoE) offload functions.
 * int (*ndo_fcoe_enable)(struct net_device *dev);
 *	Called when the FCoE protocol stack wants to start using LLD for FCoE
 *	so the underlying device can perform whatever needed configuration or
 *	initialization to support acceleration of FCoE traffic.
 *
 * int (*ndo_fcoe_disable)(struct net_device *dev);
 *	Called when the FCoE protocol stack wants to stop using LLD for FCoE
 *	so the underlying device can perform whatever needed clean-ups to
 *	stop supporting acceleration of FCoE traffic.
 *
 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
 *			     struct scatterlist *sgl, unsigned int sgc);
 *	Called when the FCoE Initiator wants to initialize an I/O that
 *	is a possible candidate for Direct Data Placement (DDP). The LLD can
 *	perform necessary setup and returns 1 to indicate the device is set up
 *	successfully to perform DDP on this I/O, otherwise this returns 0.
 *
 * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
 *	Called when the FCoE Initiator/Target is done with the DDPed I/O as
 *	indicated by the FC exchange id 'xid', so the underlying device can
 *	clean up and reuse resources for later DDP requests.
 *
 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
 *			      struct scatterlist *sgl, unsigned int sgc);
 *	Called when the FCoE Target wants to initialize an I/O that
 *	is a possible candidate for Direct Data Placement (DDP). The LLD can
 *	perform necessary setup and returns 1 to indicate the device is set up
 *	successfully to perform DDP on this I/O, otherwise this returns 0.
 *
900 901 902 903 904 905 906
 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
 *			       struct netdev_fcoe_hbainfo *hbainfo);
 *	Called when the FCoE Protocol stack wants information on the underlying
 *	device. This information is utilized by the FCoE protocol stack to
 *	register attributes with Fiber Channel management service as per the
 *	FC-GS Fabric Device Management Information(FDMI) specification.
 *
907 908 909 910 911 912
 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
 *	Called when the underlying device wants to override default World Wide
 *	Name (WWN) generation mechanism in FCoE protocol stack to pass its own
 *	World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
 *	protocol stack to use.
 *
913 914 915 916 917 918
 *	RFS acceleration.
 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
 *			    u16 rxq_index, u32 flow_id);
 *	Set hardware filter for RFS.  rxq_index is the target queue index;
 *	flow_id is a flow ID to be passed to rps_may_expire_flow() later.
 *	Return the filter ID on success, or a negative error code.
919
 *
920
 *	Slave management functions (for bridge, bonding, etc).
921 922 923 924 925
 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
 *	Called to make another netdev an underling.
 *
 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
 *	Called to release previously enslaved netdev.
926 927
 *
 *      Feature/offload setting functions.
928 929
 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
 *		netdev_features_t features);
930 931 932 933
 *	Adjusts the requested feature flags according to device-specific
 *	constraints, and returns the resulting flags. Must not modify
 *	the device state.
 *
934
 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
935 936 937 938
 *	Called to update device configuration to new features. Passed
 *	feature set might be less than what was returned by ndo_fix_features()).
 *	Must return >0 or -errno if it changed dev->features itself.
 *
939 940
 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
 *		      struct net_device *dev,
941
 *		      const unsigned char *addr, u16 flags)
942
 *	Adds an FDB entry to dev for addr.
943 944
 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
 *		      struct net_device *dev,
945
 *		      const unsigned char *addr)
946 947 948 949 950
 *	Deletes the FDB entry from dev coresponding to addr.
 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
 *		       struct net_device *dev, int idx)
 *	Used to add FDB entries to dump requests. Implementers should add
 *	entries to skb and update idx with the number of entries.
J
John Fastabend 已提交
951 952 953
 *
 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
954
 *			     struct net_device *dev, u32 filter_mask)
J
Jiri Pirko 已提交
955 956 957 958 959 960 961 962
 *
 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
 *	Called to change device carrier. Soft-devices (like dummy, team, etc)
 *	which do not represent real hardware may define this to allow their
 *	userspace components to manage their virtual carrier state. Devices
 *	that determine carrier state from physical hardware properties (eg
 *	network cables) or protocol-dependent mechanisms (eg
 *	USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
963 964 965 966 967 968
 *
 * int (*ndo_get_phys_port_id)(struct net_device *dev,
 *			       struct netdev_phys_port_id *ppid);
 *	Called to get ID of physical port of this device. If driver does
 *	not implement this, it is assumed that the hw is not able to have
 *	multiple net devices on single physical port.
969 970
 *
 * void (*ndo_add_vxlan_port)(struct  net_device *dev,
J
Joseph Gasparakis 已提交
971
 *			      sa_family_t sa_family, __be16 port);
972 973 974 975 976 977
 *	Called by vxlan to notiy a driver about the UDP port and socket
 *	address family that vxlan is listnening to. It is called only when
 *	a new port starts listening. The operation is protected by the
 *	vxlan_net->sock_lock.
 *
 * void (*ndo_del_vxlan_port)(struct  net_device *dev,
J
Joseph Gasparakis 已提交
978
 *			      sa_family_t sa_family, __be16 port);
979 980 981
 *	Called by vxlan to notify the driver about a UDP port and socket
 *	address family that vxlan is not listening to anymore. The operation
 *	is protected by the vxlan_net->sock_lock.
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
 *
 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
 *				 struct net_device *dev)
 *	Called by upper layer devices to accelerate switching or other
 *	station functionality into hardware. 'pdev is the lowerdev
 *	to use for the offload and 'dev' is the net device that will
 *	back the offload. Returns a pointer to the private structure
 *	the upper layer will maintain.
 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
 *	Called by upper layer device to delete the station created
 *	by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
 *	the station and priv is the structure returned by the add
 *	operation.
 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
 *				      struct net_device *dev,
 *				      void *priv);
 *	Callback to use for xmit over the accelerated station. This
 *	is used in place of ndo_start_xmit on accelerated net
 *	devices.
1001 1002 1003 1004 1005 1006
 */
struct net_device_ops {
	int			(*ndo_init)(struct net_device *dev);
	void			(*ndo_uninit)(struct net_device *dev);
	int			(*ndo_open)(struct net_device *dev);
	int			(*ndo_stop)(struct net_device *dev);
1007
	netdev_tx_t		(*ndo_start_xmit) (struct sk_buff *skb,
1008 1009
						   struct net_device *dev);
	u16			(*ndo_select_queue)(struct net_device *dev,
1010
						    struct sk_buff *skb,
1011 1012
						    void *accel_priv,
						    select_queue_fallback_t fallback);
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
	void			(*ndo_change_rx_flags)(struct net_device *dev,
						       int flags);
	void			(*ndo_set_rx_mode)(struct net_device *dev);
	int			(*ndo_set_mac_address)(struct net_device *dev,
						       void *addr);
	int			(*ndo_validate_addr)(struct net_device *dev);
	int			(*ndo_do_ioctl)(struct net_device *dev,
					        struct ifreq *ifr, int cmd);
	int			(*ndo_set_config)(struct net_device *dev,
					          struct ifmap *map);
1023 1024 1025 1026
	int			(*ndo_change_mtu)(struct net_device *dev,
						  int new_mtu);
	int			(*ndo_neigh_setup)(struct net_device *dev,
						   struct neigh_parms *);
1027 1028
	void			(*ndo_tx_timeout) (struct net_device *dev);

1029 1030
	struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
						     struct rtnl_link_stats64 *storage);
1031 1032
	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);

1033
	int			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
1034
						       __be16 proto, u16 vid);
1035
	int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1036
						        __be16 proto, u16 vid);
1037 1038
#ifdef CONFIG_NET_POLL_CONTROLLER
	void                    (*ndo_poll_controller)(struct net_device *dev);
H
Herbert Xu 已提交
1039
	int			(*ndo_netpoll_setup)(struct net_device *dev,
1040 1041
						     struct netpoll_info *info,
						     gfp_t gfp);
1042
	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
E
Eliezer Tamir 已提交
1043
#endif
1044
#ifdef CONFIG_NET_RX_BUSY_POLL
1045
	int			(*ndo_busy_poll)(struct napi_struct *dev);
1046
#endif
1047 1048 1049 1050 1051 1052
	int			(*ndo_set_vf_mac)(struct net_device *dev,
						  int queue, u8 *mac);
	int			(*ndo_set_vf_vlan)(struct net_device *dev,
						   int queue, u16 vlan, u8 qos);
	int			(*ndo_set_vf_tx_rate)(struct net_device *dev,
						      int vf, int rate);
1053 1054
	int			(*ndo_set_vf_spoofchk)(struct net_device *dev,
						       int vf, bool setting);
1055 1056 1057
	int			(*ndo_get_vf_config)(struct net_device *dev,
						     int vf,
						     struct ifla_vf_info *ivf);
1058 1059
	int			(*ndo_set_vf_link_state)(struct net_device *dev,
							 int vf, int link_state);
1060 1061 1062 1063 1064
	int			(*ndo_set_vf_port)(struct net_device *dev,
						   int vf,
						   struct nlattr *port[]);
	int			(*ndo_get_vf_port)(struct net_device *dev,
						   int vf, struct sk_buff *skb);
1065
	int			(*ndo_setup_tc)(struct net_device *dev, u8 tc);
1066
#if IS_ENABLED(CONFIG_FCOE)
1067 1068
	int			(*ndo_fcoe_enable)(struct net_device *dev);
	int			(*ndo_fcoe_disable)(struct net_device *dev);
1069 1070 1071 1072 1073 1074
	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev,
						      u16 xid,
						      struct scatterlist *sgl,
						      unsigned int sgc);
	int			(*ndo_fcoe_ddp_done)(struct net_device *dev,
						     u16 xid);
1075 1076 1077 1078
	int			(*ndo_fcoe_ddp_target)(struct net_device *dev,
						       u16 xid,
						       struct scatterlist *sgl,
						       unsigned int sgc);
1079 1080
	int			(*ndo_fcoe_get_hbainfo)(struct net_device *dev,
							struct netdev_fcoe_hbainfo *hbainfo);
1081 1082
#endif

1083
#if IS_ENABLED(CONFIG_LIBFCOE)
1084 1085 1086 1087
#define NETDEV_FCOE_WWNN 0
#define NETDEV_FCOE_WWPN 1
	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
						    u64 *wwn, int type);
1088
#endif
1089

1090 1091 1092 1093 1094 1095
#ifdef CONFIG_RFS_ACCEL
	int			(*ndo_rx_flow_steer)(struct net_device *dev,
						     const struct sk_buff *skb,
						     u16 rxq_index,
						     u32 flow_id);
#endif
1096 1097 1098 1099
	int			(*ndo_add_slave)(struct net_device *dev,
						 struct net_device *slave_dev);
	int			(*ndo_del_slave)(struct net_device *dev,
						 struct net_device *slave_dev);
1100 1101
	netdev_features_t	(*ndo_fix_features)(struct net_device *dev,
						    netdev_features_t features);
1102
	int			(*ndo_set_features)(struct net_device *dev,
1103
						    netdev_features_t features);
1104
	int			(*ndo_neigh_construct)(struct neighbour *n);
1105
	void			(*ndo_neigh_destroy)(struct neighbour *n);
1106 1107

	int			(*ndo_fdb_add)(struct ndmsg *ndm,
1108
					       struct nlattr *tb[],
1109
					       struct net_device *dev,
1110
					       const unsigned char *addr,
1111 1112
					       u16 flags);
	int			(*ndo_fdb_del)(struct ndmsg *ndm,
1113
					       struct nlattr *tb[],
1114
					       struct net_device *dev,
1115
					       const unsigned char *addr);
1116 1117 1118 1119
	int			(*ndo_fdb_dump)(struct sk_buff *skb,
						struct netlink_callback *cb,
						struct net_device *dev,
						int idx);
J
John Fastabend 已提交
1120 1121 1122 1123 1124

	int			(*ndo_bridge_setlink)(struct net_device *dev,
						      struct nlmsghdr *nlh);
	int			(*ndo_bridge_getlink)(struct sk_buff *skb,
						      u32 pid, u32 seq,
1125 1126
						      struct net_device *dev,
						      u32 filter_mask);
1127 1128
	int			(*ndo_bridge_dellink)(struct net_device *dev,
						      struct nlmsghdr *nlh);
J
Jiri Pirko 已提交
1129 1130
	int			(*ndo_change_carrier)(struct net_device *dev,
						      bool new_carrier);
1131 1132
	int			(*ndo_get_phys_port_id)(struct net_device *dev,
							struct netdev_phys_port_id *ppid);
1133 1134
	void			(*ndo_add_vxlan_port)(struct  net_device *dev,
						      sa_family_t sa_family,
J
Joseph Gasparakis 已提交
1135
						      __be16 port);
1136 1137
	void			(*ndo_del_vxlan_port)(struct  net_device *dev,
						      sa_family_t sa_family,
J
Joseph Gasparakis 已提交
1138
						      __be16 port);
1139 1140 1141 1142 1143 1144 1145 1146 1147

	void*			(*ndo_dfwd_add_station)(struct net_device *pdev,
							struct net_device *dev);
	void			(*ndo_dfwd_del_station)(struct net_device *pdev,
							void *priv);

	netdev_tx_t		(*ndo_dfwd_start_xmit) (struct sk_buff *skb,
							struct net_device *dev,
							void *priv);
1148 1149
};

L
Linus Torvalds 已提交
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
/*
 *	The DEVICE structure.
 *	Actually, this whole structure is a big mistake.  It mixes I/O
 *	data with strictly "high-level" data, and it has to know about
 *	almost every data structure used in the INET module.
 *
 *	FIXME: cleanup struct net_device such that network protocol info
 *	moves out.
 */

E
Eric Dumazet 已提交
1160
struct net_device {
L
Linus Torvalds 已提交
1161 1162 1163 1164

	/*
	 * This is the first field of the "visible" part of this structure
	 * (i.e. as seen by users in the "Space.c" file).  It is the name
1165
	 * of the interface.
L
Linus Torvalds 已提交
1166 1167
	 */
	char			name[IFNAMSIZ];
M
Mark Gross 已提交
1168

1169
	/* device name hash chain, please keep it close to name[] */
1170
	struct hlist_node	name_hlist;
1171

1172 1173
	/* snmp alias */
	char 			*ifalias;
L
Linus Torvalds 已提交
1174 1175 1176 1177 1178 1179 1180 1181

	/*
	 *	I/O specific fields
	 *	FIXME: Merge these and struct ifmap into one
	 */
	unsigned long		mem_end;	/* shared mem end	*/
	unsigned long		mem_start;	/* shared mem start	*/
	unsigned long		base_addr;	/* device I/O address	*/
1182
	int			irq;		/* device IRQ number	*/
L
Linus Torvalds 已提交
1183 1184 1185 1186 1187 1188 1189 1190

	/*
	 *	Some hardware also needs these fields, but they are not
	 *	part of the usual set specified in Space.c.
	 */

	unsigned long		state;

1191
	struct list_head	dev_list;
1192
	struct list_head	napi_list;
1193
	struct list_head	unreg_list;
1194
	struct list_head	close_list;
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206

	/* directly linked devices, like slaves for bonding */
	struct {
		struct list_head upper;
		struct list_head lower;
	} adj_list;

	/* all linked devices, *including* neighbours */
	struct {
		struct list_head upper;
		struct list_head lower;
	} all_adj_list;
1207

L
Linus Torvalds 已提交
1208

1209
	/* currently active device features */
1210
	netdev_features_t	features;
1211
	/* user-changeable features */
1212
	netdev_features_t	hw_features;
1213
	/* user-requested features */
1214
	netdev_features_t	wanted_features;
1215
	/* mask of features inheritable by VLAN devices */
1216
	netdev_features_t	vlan_features;
1217 1218 1219 1220 1221 1222
	/* mask of features inherited by encapsulating devices
	 * This field indicates what encapsulation offloads
	 * the hardware is capable of doing, and drivers will
	 * need to set them appropriately.
	 */
	netdev_features_t	hw_enc_features;
S
Simon Horman 已提交
1223 1224
	/* mask of fetures inheritable by MPLS */
	netdev_features_t	mpls_features;
1225

L
Linus Torvalds 已提交
1226 1227 1228 1229
	/* Interface index. Unique device identifier	*/
	int			ifindex;
	int			iflink;

R
Rusty Russell 已提交
1230
	struct net_device_stats	stats;
1231 1232 1233
	atomic_long_t		rx_dropped; /* dropped packets by core network
					     * Do not use this in drivers.
					     */
L
Linus Torvalds 已提交
1234

1235
#ifdef CONFIG_WIRELESS_EXT
L
Linus Torvalds 已提交
1236 1237 1238 1239 1240
	/* List of functions to handle Wireless Extensions (instead of ioctl).
	 * See <net/iw_handler.h> for details. Jean II */
	const struct iw_handler_def *	wireless_handlers;
	/* Instance data managed by the core of Wireless Extensions. */
	struct iw_public_data *	wireless_data;
1241
#endif
1242 1243
	/* Management operations */
	const struct net_device_ops *netdev_ops;
1244
	const struct ethtool_ops *ethtool_ops;
1245
	const struct forwarding_accel_ops *fwd_ops;
L
Linus Torvalds 已提交
1246

1247 1248 1249
	/* Hardware header description */
	const struct header_ops *header_ops;

S
Stefan Rompf 已提交
1250
	unsigned int		flags;	/* interface flags (a la BSD)	*/
1251 1252
	unsigned int		priv_flags; /* Like 'flags' but invisible to userspace.
					     * See if.h for definitions. */
L
Linus Torvalds 已提交
1253 1254 1255
	unsigned short		gflags;
	unsigned short		padded;	/* How much padding added by alloc_netdev() */

S
Stefan Rompf 已提交
1256 1257 1258
	unsigned char		operstate; /* RFC2863 operstate */
	unsigned char		link_mode; /* mapping policy to operstate */

1259 1260 1261
	unsigned char		if_port;	/* Selectable AUI, TP,..*/
	unsigned char		dma;		/* DMA channel		*/

1262
	unsigned int		mtu;	/* interface MTU value		*/
L
Linus Torvalds 已提交
1263 1264 1265
	unsigned short		type;	/* interface hardware type	*/
	unsigned short		hard_header_len;	/* hardware hdr length	*/

1266 1267 1268 1269 1270 1271 1272
	/* extra head- and tailroom the hardware may need, but not in all cases
	 * can this be guaranteed, especially tailroom. Some cases also use
	 * LL_MAX_HEADER instead to allocate the skb.
	 */
	unsigned short		needed_headroom;
	unsigned short		needed_tailroom;

L
Linus Torvalds 已提交
1273
	/* Interface address info. */
1274
	unsigned char		perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1275
	unsigned char		addr_assign_type; /* hw address assignment type */
L
Linus Torvalds 已提交
1276
	unsigned char		addr_len;	/* hardware address length	*/
1277
	unsigned short		neigh_priv_len;
1278 1279 1280 1281
	unsigned short          dev_id;		/* Used to differentiate devices
						 * that share the same link
						 * layer address
						 */
J
Jiri Pirko 已提交
1282
	spinlock_t		addr_list_lock;
1283 1284
	struct netdev_hw_addr_list	uc;	/* Unicast mac addresses */
	struct netdev_hw_addr_list	mc;	/* Multicast mac addresses */
1285 1286 1287 1288 1289 1290 1291
	struct netdev_hw_addr_list	dev_addrs; /* list of device
						    * hw addresses
						    */
#ifdef CONFIG_SYSFS
	struct kset		*queues_kset;
#endif

1292
	bool			uc_promisc;
1293 1294
	unsigned int		promiscuity;
	unsigned int		allmulti;
L
Linus Torvalds 已提交
1295 1296 1297


	/* Protocol specific pointers */
1298

1299
#if IS_ENABLED(CONFIG_VLAN_8021Q)
1300
	struct vlan_info __rcu	*vlan_info;	/* VLAN info */
1301
#endif
1302
#if IS_ENABLED(CONFIG_NET_DSA)
1303
	struct dsa_switch_tree	*dsa_ptr;	/* dsa specific data */
1304 1305 1306
#endif
#if IS_ENABLED(CONFIG_TIPC)
	struct tipc_bearer __rcu *tipc_ptr;	/* TIPC specific data */
1307
#endif
L
Linus Torvalds 已提交
1308
	void 			*atalk_ptr;	/* AppleTalk link 	*/
E
Eric Dumazet 已提交
1309
	struct in_device __rcu	*ip_ptr;	/* IPv4 specific data	*/
1310
	struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */
E
Eric Dumazet 已提交
1311
	struct inet6_dev __rcu	*ip6_ptr;       /* IPv6 specific data */
L
Linus Torvalds 已提交
1312
	void			*ax25_ptr;	/* AX.25 specific data */
1313 1314
	struct wireless_dev	*ieee80211_ptr;	/* IEEE 802.11 specific data,
						   assign before registering */
L
Linus Torvalds 已提交
1315

1316
/*
E
Eric Dumazet 已提交
1317
 * Cache lines mostly used on receive path (including eth_type_trans())
1318
 */
1319 1320 1321 1322 1323 1324 1325 1326
	unsigned long		last_rx;	/* Time of last Rx
						 * This should not be set in
						 * drivers, unless really needed,
						 * because network stack (bonding)
						 * use it if/when necessary, to
						 * avoid dirtying this cache line.
						 */

1327
	/* Interface address info used in eth_type_trans() */
1328 1329 1330 1331
	unsigned char		*dev_addr;	/* hw address, (before bcast
						   because most packets are
						   unicast) */

T
Tom Herbert 已提交
1332

1333
#ifdef CONFIG_SYSFS
T
Tom Herbert 已提交
1334 1335
	struct netdev_rx_queue	*_rx;

1336
	/* Number of RX queues allocated at register_netdev() time */
T
Tom Herbert 已提交
1337
	unsigned int		num_rx_queues;
1338 1339 1340

	/* Number of RX queues currently active in device */
	unsigned int		real_num_rx_queues;
1341

E
Eric Dumazet 已提交
1342
#endif
T
Tom Herbert 已提交
1343

1344 1345
	rx_handler_func_t __rcu	*rx_handler;
	void __rcu		*rx_handler_data;
1346

1347
	struct netdev_queue __rcu *ingress_queue;
1348 1349
	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/

E
Eric Dumazet 已提交
1350 1351 1352 1353

/*
 * Cache lines mostly used on transmit path
 */
1354
	struct netdev_queue	*_tx ____cacheline_aligned_in_smp;
1355 1356

	/* Number of TX queues allocated at alloc_netdev_mq() time  */
1357
	unsigned int		num_tx_queues;
1358 1359 1360 1361

	/* Number of TX queues currently active in device  */
	unsigned int		real_num_tx_queues;

1362 1363 1364
	/* root qdisc from userspace point of view */
	struct Qdisc		*qdisc;

L
Linus Torvalds 已提交
1365
	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
1366
	spinlock_t		tx_global_lock;
E
Eric Dumazet 已提交
1367

T
Tom Herbert 已提交
1368
#ifdef CONFIG_XPS
E
Eric Dumazet 已提交
1369
	struct xps_dev_maps __rcu *xps_maps;
T
Tom Herbert 已提交
1370
#endif
1371 1372 1373 1374 1375 1376
#ifdef CONFIG_RFS_ACCEL
	/* CPU reverse-mapping for RX completion interrupts, indexed
	 * by RX queue number.  Assigned by driver.  This must only be
	 * set if the ndo_rx_flow_steer operation is defined. */
	struct cpu_rmap		*rx_cpu_rmap;
#endif
T
Tom Herbert 已提交
1377

1378
	/* These may be needed for future network-power-down code. */
1379 1380 1381 1382 1383

	/*
	 * trans_start here is expensive for high speed devices on SMP,
	 * please use netdev_queue->trans_start instead.
	 */
1384 1385 1386 1387 1388
	unsigned long		trans_start;	/* Time (in jiffies) of last Tx	*/

	int			watchdog_timeo; /* used by dev_watchdog() */
	struct timer_list	watchdog_timer;

L
Linus Torvalds 已提交
1389
	/* Number of references to this device */
E
Eric Dumazet 已提交
1390
	int __percpu		*pcpu_refcnt;
1391

L
Linus Torvalds 已提交
1392 1393 1394 1395 1396
	/* delayed register/unregister */
	struct list_head	todo_list;
	/* device index hash chain */
	struct hlist_node	index_hlist;

1397
	struct list_head	link_watch_list;
1398

L
Linus Torvalds 已提交
1399 1400
	/* register/unregister state machine */
	enum { NETREG_UNINITIALIZED=0,
1401
	       NETREG_REGISTERED,	/* completed register_netdevice */
L
Linus Torvalds 已提交
1402 1403 1404
	       NETREG_UNREGISTERING,	/* called unregister_netdevice */
	       NETREG_UNREGISTERED,	/* completed unregister todo */
	       NETREG_RELEASED,		/* called free_netdev */
1405
	       NETREG_DUMMY,		/* dummy device for NAPI poll */
1406 1407 1408
	} reg_state:8;

	bool dismantle; /* device is going do be freed */
1409 1410 1411 1412 1413

	enum {
		RTNL_LINK_INITIALIZED,
		RTNL_LINK_INITIALIZING,
	} rtnl_link_state:16;
L
Linus Torvalds 已提交
1414

1415 1416
	/* Called from unregister, can be used to call free_netdev */
	void (*destructor)(struct net_device *dev);
L
Linus Torvalds 已提交
1417 1418

#ifdef CONFIG_NETPOLL
1419
	struct netpoll_info __rcu	*npinfo;
L
Linus Torvalds 已提交
1420
#endif
1421

1422
#ifdef CONFIG_NET_NS
1423 1424
	/* Network namespace this network device is inside */
	struct net		*nd_net;
1425
#endif
1426

D
David S. Miller 已提交
1427
	/* mid-layer private */
E
Eric Dumazet 已提交
1428 1429 1430
	union {
		void				*ml_priv;
		struct pcpu_lstats __percpu	*lstats; /* loopback stats */
1431
		struct pcpu_sw_netstats __percpu	*tstats;
1432
		struct pcpu_dstats __percpu	*dstats; /* dummy stats */
E
Eric Dumazet 已提交
1433
		struct pcpu_vstats __percpu	*vstats; /* veth stats */
E
Eric Dumazet 已提交
1434
	};
1435
	/* GARP */
E
Eric Dumazet 已提交
1436
	struct garp_port __rcu	*garp_port;
1437 1438
	/* MRP */
	struct mrp_port __rcu	*mrp_port;
L
Linus Torvalds 已提交
1439 1440

	/* class/net/name entry */
1441
	struct device		dev;
1442 1443
	/* space for optional device, statistics, and wireless sysfs groups */
	const struct attribute_group *sysfs_groups[4];
1444 1445
	/* space for optional per-rx queue attributes */
	const struct attribute_group *sysfs_rx_queue_group;
P
Patrick McHardy 已提交
1446 1447 1448

	/* rtnetlink link ops */
	const struct rtnl_link_ops *rtnl_link_ops;
1449

1450 1451 1452
	/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE		65536
	unsigned int		gso_max_size;
1453 1454
#define GSO_MAX_SEGS		65535
	u16			gso_max_segs;
1455

J
Jeff Kirsher 已提交
1456
#ifdef CONFIG_DCB
1457
	/* Data Center Bridging netlink ops */
1458
	const struct dcbnl_rtnl_ops *dcbnl_ops;
1459
#endif
1460 1461 1462
	u8 num_tc;
	struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
	u8 prio_tc_map[TC_BITMASK + 1];
1463

1464
#if IS_ENABLED(CONFIG_FCOE)
1465 1466
	/* max exchange id for FCoE LRO by ddp */
	unsigned int		fcoe_ddp_xid;
1467
#endif
1468
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1469
	struct netprio_map __rcu *priomap;
1470
#endif
1471 1472
	/* phy device may attach itself for hardware timestamping */
	struct phy_device *phydev;
1473

1474 1475
	struct lock_class_key *qdisc_tx_busylock;

1476 1477
	/* group the device belongs to */
	int group;
1478 1479

	struct pm_qos_request	pm_qos_req;
L
Linus Torvalds 已提交
1480
};
1481
#define to_net_dev(d) container_of(d, struct net_device, dev)
L
Linus Torvalds 已提交
1482 1483 1484

#define	NETDEV_ALIGN		32

1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
static inline
int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
{
	return dev->prio_tc_map[prio & TC_BITMASK];
}

static inline
int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
{
	if (tc >= dev->num_tc)
		return -EINVAL;

	dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
	return 0;
}

static inline
void netdev_reset_tc(struct net_device *dev)
{
	dev->num_tc = 0;
	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
}

static inline
int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
{
	if (tc >= dev->num_tc)
		return -EINVAL;

	dev->tc_to_txq[tc].count = count;
	dev->tc_to_txq[tc].offset = offset;
	return 0;
}

static inline
int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
{
	if (num_tc > TC_MAX_QUEUE)
		return -EINVAL;

	dev->num_tc = num_tc;
	return 0;
}

static inline
int netdev_get_num_tc(struct net_device *dev)
{
	return dev->num_tc;
}

1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
					 unsigned int index)
{
	return &dev->_tx[index];
}

static inline void netdev_for_each_tx_queue(struct net_device *dev,
					    void (*f)(struct net_device *,
						      struct netdev_queue *,
						      void *),
					    void *arg)
{
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++)
		f(dev, &dev->_tx[i], arg);
}

1555
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1556 1557
				    struct sk_buff *skb,
				    void *accel_priv);
1558

1559 1560 1561 1562 1563 1564
/*
 * Net namespace inlines
 */
static inline
struct net *dev_net(const struct net_device *dev)
{
E
Eric Dumazet 已提交
1565
	return read_pnet(&dev->nd_net);
1566 1567 1568
}

static inline
1569
void dev_net_set(struct net_device *dev, struct net *net)
1570 1571
{
#ifdef CONFIG_NET_NS
1572 1573
	release_net(dev->nd_net);
	dev->nd_net = hold_net(net);
1574 1575 1576
#endif
}

1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
static inline bool netdev_uses_dsa_tags(struct net_device *dev)
{
#ifdef CONFIG_NET_DSA_TAG_DSA
	if (dev->dsa_ptr != NULL)
		return dsa_uses_dsa_tags(dev->dsa_ptr);
#endif

	return 0;
}

1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
static inline bool netdev_uses_trailer_tags(struct net_device *dev)
{
#ifdef CONFIG_NET_DSA_TAG_TRAILER
	if (dev->dsa_ptr != NULL)
		return dsa_uses_trailer_tags(dev->dsa_ptr);
#endif

	return 0;
}

1597 1598 1599 1600 1601 1602
/**
 *	netdev_priv - access network device private data
 *	@dev: network device
 *
 * Get network device private data
 */
1603
static inline void *netdev_priv(const struct net_device *dev)
L
Linus Torvalds 已提交
1604
{
1605
	return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
L
Linus Torvalds 已提交
1606 1607 1608 1609 1610
}

/* Set the sysfs physical device reference for the network logical device
 * if set prior to registration will cause a symlink during initialization.
 */
1611
#define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))
L
Linus Torvalds 已提交
1612

1613
/* Set the sysfs device type for the network logical device to allow
1614
 * fine-grained identification of different network device types. For
1615 1616 1617 1618
 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
 */
#define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))

E
Eric Dumazet 已提交
1619 1620 1621 1622 1623
/* Default NAPI poll() weight
 * Device drivers are strongly advised to not use bigger value
 */
#define NAPI_POLL_WEIGHT 64

1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
/**
 *	netif_napi_add - initialize a napi context
 *	@dev:  network device
 *	@napi: napi context
 *	@poll: polling function
 *	@weight: default weight
 *
 * netif_napi_add() must be used to initialize a napi context prior to calling
 * *any* of the other napi related functions.
 */
1634 1635
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
		    int (*poll)(struct napi_struct *, int), int weight);
1636

1637 1638 1639 1640 1641 1642
/**
 *  netif_napi_del - remove a napi context
 *  @napi: napi context
 *
 *  netif_napi_del() removes a napi context from the network device napi list
 */
1643 1644 1645
void netif_napi_del(struct napi_struct *napi);

struct napi_gro_cb {
1646 1647 1648
	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
	void *frag0;

1649 1650 1651
	/* Length of frag0. */
	unsigned int frag0_len;

1652 1653 1654
	/* This indicates where we are processing relative to skb->data. */
	int data_offset;

1655
	/* This is non-zero if the packet cannot be merged with the new skb. */
1656 1657 1658 1659
	u16	flush;

	/* Save the IP ID here and check when we get to the transport layer */
	u16	flush_id;
1660 1661

	/* Number of segments aggregated. */
1662 1663 1664 1665
	u16	count;

	/* This is non-zero if the packet may be of the same flow. */
	u8	same_flow;
H
Herbert Xu 已提交
1666 1667

	/* Free the skb? */
1668
	u8	free;
1669 1670
#define NAPI_GRO_FREE		  1
#define NAPI_GRO_FREE_STOLEN_HEAD 2
1671 1672 1673

	/* jiffies when first packet was created/queued */
	unsigned long age;
1674 1675

	/* Used in ipv6_gro_receive() */
1676 1677 1678 1679
	u16	proto;

	/* Used in udp_gro_receive */
	u16	udp_mark;
1680

1681 1682 1683
	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
	__wsum	csum;

1684 1685
	/* used in skb_gro_receive() slow path */
	struct sk_buff *last;
1686 1687 1688
};

#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1689

L
Linus Torvalds 已提交
1690
struct packet_type {
D
David S. Miller 已提交
1691 1692 1693 1694 1695 1696
	__be16			type;	/* This is really htons(ether_type). */
	struct net_device	*dev;	/* NULL is wildcarded here	     */
	int			(*func) (struct sk_buff *,
					 struct net_device *,
					 struct packet_type *,
					 struct net_device *);
1697 1698
	bool			(*id_match)(struct packet_type *ptype,
					    struct sock *sk);
L
Linus Torvalds 已提交
1699 1700 1701 1702
	void			*af_packet_priv;
	struct list_head	list;
};

1703
struct offload_callbacks {
1704
	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
1705
						netdev_features_t features);
1706
	int			(*gso_send_check)(struct sk_buff *skb);
1707 1708
	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
					       struct sk_buff *skb);
1709
	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
1710 1711 1712 1713 1714 1715
};

struct packet_offload {
	__be16			 type;	/* This is really htons(ether_type). */
	struct offload_callbacks callbacks;
	struct list_head	 list;
L
Linus Torvalds 已提交
1716 1717
};

1718 1719 1720 1721 1722
struct udp_offload {
	__be16			 port;
	struct offload_callbacks callbacks;
};

1723 1724 1725 1726 1727 1728 1729 1730 1731
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
	u64     rx_packets;
	u64     rx_bytes;
	u64     tx_packets;
	u64     tx_bytes;
	struct u64_stats_sync   syncp;
};

L
Linus Torvalds 已提交
1732 1733
#include <linux/notifier.h>

1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
/* netdevice notifier chain. Please remember to update the rtnetlink
 * notification exclusion list in rtnetlink_event() when adding new
 * types.
 */
#define NETDEV_UP	0x0001	/* For now you can't veto a device up/down */
#define NETDEV_DOWN	0x0002
#define NETDEV_REBOOT	0x0003	/* Tell a protocol stack a network interface
				   detected a hardware crash and restarted
				   - we can use this eg to kick tcp sessions
				   once done */
#define NETDEV_CHANGE	0x0004	/* Notify device state change */
#define NETDEV_REGISTER 0x0005
#define NETDEV_UNREGISTER	0x0006
1747
#define NETDEV_CHANGEMTU	0x0007 /* notify after mtu change happened */
1748 1749 1750 1751 1752 1753 1754 1755 1756
#define NETDEV_CHANGEADDR	0x0008
#define NETDEV_GOING_DOWN	0x0009
#define NETDEV_CHANGENAME	0x000A
#define NETDEV_FEAT_CHANGE	0x000B
#define NETDEV_BONDING_FAILOVER 0x000C
#define NETDEV_PRE_UP		0x000D
#define NETDEV_PRE_TYPE_CHANGE	0x000E
#define NETDEV_POST_TYPE_CHANGE	0x000F
#define NETDEV_POST_INIT	0x0010
1757
#define NETDEV_UNREGISTER_FINAL 0x0011
1758 1759 1760
#define NETDEV_RELEASE		0x0012
#define NETDEV_NOTIFY_PEERS	0x0013
#define NETDEV_JOIN		0x0014
1761
#define NETDEV_CHANGEUPPER	0x0015
1762
#define NETDEV_RESEND_IGMP	0x0016
1763
#define NETDEV_PRECHANGEMTU	0x0017 /* notify before mtu change happened */
1764

1765 1766
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
1767 1768 1769 1770 1771

struct netdev_notifier_info {
	struct net_device *dev;
};

1772 1773 1774 1775 1776
struct netdev_notifier_change_info {
	struct netdev_notifier_info info; /* must be first */
	unsigned int flags_changed;
};

1777 1778 1779 1780 1781 1782
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
					     struct net_device *dev)
{
	info->dev = dev;
}

1783 1784 1785 1786 1787 1788
static inline struct net_device *
netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
{
	return info->dev;
}

1789
int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1790 1791


L
Linus Torvalds 已提交
1792 1793
extern rwlock_t				dev_base_lock;		/* Device list lock */

1794 1795
#define for_each_netdev(net, d)		\
		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1796 1797
#define for_each_netdev_reverse(net, d)	\
		list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1798 1799
#define for_each_netdev_rcu(net, d)		\
		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1800 1801 1802 1803
#define for_each_netdev_safe(net, d, n)	\
		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue(net, d)		\
		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1804 1805
#define for_each_netdev_continue_rcu(net, d)		\
	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1806 1807 1808
#define for_each_netdev_in_bond_rcu(bond, slave)	\
		for_each_netdev_rcu(&init_net, slave)	\
			if (netdev_master_upper_dev_get_rcu(slave) == bond)
1809
#define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)
1810

1811 1812 1813 1814 1815
static inline struct net_device *next_net_device(struct net_device *dev)
{
	struct list_head *lh;
	struct net *net;

1816
	net = dev_net(dev);
1817 1818 1819 1820
	lh = dev->dev_list.next;
	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}

1821 1822 1823 1824 1825 1826
static inline struct net_device *next_net_device_rcu(struct net_device *dev)
{
	struct list_head *lh;
	struct net *net;

	net = dev_net(dev);
E
Eric Dumazet 已提交
1827
	lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1828 1829 1830
	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}

1831 1832 1833 1834 1835
static inline struct net_device *first_net_device(struct net *net)
{
	return list_empty(&net->dev_base_head) ? NULL :
		net_device_entry(net->dev_base_head.next);
}
1836

E
Eric Dumazet 已提交
1837 1838 1839 1840 1841 1842 1843
static inline struct net_device *first_net_device_rcu(struct net *net)
{
	struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));

	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}

1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
int netdev_boot_setup_check(struct net_device *dev);
unsigned long netdev_boot_base(const char *prefix, int unit);
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
				       const char *hwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
void dev_add_pack(struct packet_type *pt);
void dev_remove_pack(struct packet_type *pt);
void __dev_remove_pack(struct packet_type *pt);
void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);

struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
					unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev);
int dev_close(struct net_device *dev);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct sk_buff *newskb);
int dev_queue_xmit(struct sk_buff *skb);
1867
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
1868 1869 1870
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
1871 1872 1873 1874 1875
static inline void unregister_netdevice(struct net_device *dev)
{
	unregister_netdevice_queue(dev, NULL);
}

1876 1877
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
1878
void netdev_freemem(struct net_device *dev);
1879 1880
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
1881

1882 1883 1884 1885 1886
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
L
Linus Torvalds 已提交
1887
#ifdef CONFIG_NETPOLL_TRAP
1888
int netpoll_trap(void);
L
Linus Torvalds 已提交
1889
#endif
1890
int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906

static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
	return NAPI_GRO_CB(skb)->data_offset;
}

static inline unsigned int skb_gro_len(const struct sk_buff *skb)
{
	return skb->len - NAPI_GRO_CB(skb)->data_offset;
}

static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
{
	NAPI_GRO_CB(skb)->data_offset += len;
}

1907 1908
static inline void *skb_gro_header_fast(struct sk_buff *skb,
					unsigned int offset)
1909
{
1910 1911
	return NAPI_GRO_CB(skb)->frag0 + offset;
}
1912

1913 1914 1915 1916
static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
{
	return NAPI_GRO_CB(skb)->frag0_len < hlen;
}
1917

1918 1919 1920
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
					unsigned int offset)
{
1921 1922 1923
	if (!pskb_may_pull(skb, hlen))
		return NULL;

1924 1925
	NAPI_GRO_CB(skb)->frag0 = NULL;
	NAPI_GRO_CB(skb)->frag0_len = 0;
1926
	return skb->data + offset;
1927
}
L
Linus Torvalds 已提交
1928

1929 1930
static inline void *skb_gro_mac_header(struct sk_buff *skb)
{
1931
	return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1932 1933
}

H
Herbert Xu 已提交
1934 1935
static inline void *skb_gro_network_header(struct sk_buff *skb)
{
1936 1937
	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
	       skb_network_offset(skb);
H
Herbert Xu 已提交
1938 1939
}

1940 1941 1942 1943 1944 1945 1946 1947
static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
					const void *start, unsigned int len)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
						  csum_partial(start, len, 0));
}

1948 1949
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
				  unsigned short type,
1950
				  const void *daddr, const void *saddr,
1951
				  unsigned int len)
1952
{
1953
	if (!dev->header_ops || !dev->header_ops->create)
1954
		return 0;
1955 1956

	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1957 1958
}

S
Stephen Hemminger 已提交
1959 1960 1961 1962 1963
static inline int dev_parse_header(const struct sk_buff *skb,
				   unsigned char *haddr)
{
	const struct net_device *dev = skb->dev;

1964
	if (!dev->header_ops || !dev->header_ops->parse)
S
Stephen Hemminger 已提交
1965
		return 0;
1966
	return dev->header_ops->parse(skb, haddr);
S
Stephen Hemminger 已提交
1967 1968
}

1969 1970 1971 1972 1973 1974 1975 1976 1977
static inline int dev_rebuild_header(struct sk_buff *skb)
{
	const struct net_device *dev = skb->dev;

	if (!dev->header_ops || !dev->header_ops->rebuild)
		return 0;
	return dev->header_ops->rebuild(skb);
}

L
Linus Torvalds 已提交
1978
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1979
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
L
Linus Torvalds 已提交
1980 1981 1982 1983 1984
static inline int unregister_gifconf(unsigned int family)
{
	return register_gifconf(family, NULL);
}

1985
#ifdef CONFIG_NET_FLOW_LIMIT
1986
#define FLOW_LIMIT_HISTORY	(1 << 7)  /* must be ^2 and !overflow buckets */
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997
struct sd_flow_limit {
	u64			count;
	unsigned int		num_buckets;
	unsigned int		history_head;
	u16			history[FLOW_LIMIT_HISTORY];
	u8			buckets[];
};

extern int netdev_flow_limit_table_len;
#endif /* CONFIG_NET_FLOW_LIMIT */

L
Linus Torvalds 已提交
1998
/*
E
Eric Dumazet 已提交
1999
 * Incoming packets are placed on per-cpu queues
L
Linus Torvalds 已提交
2000
 */
E
Eric Dumazet 已提交
2001
struct softnet_data {
2002
	struct Qdisc		*output_queue;
2003
	struct Qdisc		**output_queue_tailp;
L
Linus Torvalds 已提交
2004 2005
	struct list_head	poll_list;
	struct sk_buff		*completion_queue;
2006
	struct sk_buff_head	process_queue;
L
Linus Torvalds 已提交
2007

C
Changli Gao 已提交
2008
	/* stats */
2009 2010 2011 2012
	unsigned int		processed;
	unsigned int		time_squeeze;
	unsigned int		cpu_collision;
	unsigned int		received_rps;
C
Changli Gao 已提交
2013

2014
#ifdef CONFIG_RPS
E
Eric Dumazet 已提交
2015 2016 2017
	struct softnet_data	*rps_ipi_list;

	/* Elements below can be accessed between CPUs for RPS */
T
Tom Herbert 已提交
2018
	struct call_single_data	csd ____cacheline_aligned_in_smp;
E
Eric Dumazet 已提交
2019 2020
	struct softnet_data	*rps_ipi_next;
	unsigned int		cpu;
T
Tom Herbert 已提交
2021
	unsigned int		input_queue_head;
2022
	unsigned int		input_queue_tail;
2023
#endif
2024
	unsigned int		dropped;
T
Tom Herbert 已提交
2025
	struct sk_buff_head	input_pkt_queue;
2026
	struct napi_struct	backlog;
2027 2028

#ifdef CONFIG_NET_FLOW_LIMIT
2029
	struct sd_flow_limit __rcu *flow_limit;
2030
#endif
L
Linus Torvalds 已提交
2031 2032
};

2033
static inline void input_queue_head_incr(struct softnet_data *sd)
T
Tom Herbert 已提交
2034 2035
{
#ifdef CONFIG_RPS
2036 2037 2038 2039 2040 2041 2042 2043 2044
	sd->input_queue_head++;
#endif
}

static inline void input_queue_tail_incr_save(struct softnet_data *sd,
					      unsigned int *qtail)
{
#ifdef CONFIG_RPS
	*qtail = ++sd->input_queue_tail;
T
Tom Herbert 已提交
2045 2046 2047
#endif
}

T
Tom Herbert 已提交
2048
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
L
Linus Torvalds 已提交
2049

2050
void __netif_schedule(struct Qdisc *q);
L
Linus Torvalds 已提交
2051

2052
static inline void netif_schedule_queue(struct netdev_queue *txq)
L
Linus Torvalds 已提交
2053
{
2054
	if (!(txq->state & QUEUE_STATE_ANY_XOFF))
2055
		__netif_schedule(txq->qdisc);
2056 2057
}

2058 2059 2060 2061 2062 2063 2064 2065
static inline void netif_tx_schedule_all(struct net_device *dev)
{
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++)
		netif_schedule_queue(netdev_get_tx_queue(dev, i));
}

2066 2067
static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
2068
	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2069 2070
}

2071 2072 2073 2074 2075 2076
/**
 *	netif_start_queue - allow transmit
 *	@dev: network device
 *
 *	Allow upper layers to call the device hard_start_xmit routine.
 */
L
Linus Torvalds 已提交
2077 2078
static inline void netif_start_queue(struct net_device *dev)
{
2079
	netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
L
Linus Torvalds 已提交
2080 2081
}

2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
static inline void netif_tx_start_all_queues(struct net_device *dev)
{
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		netif_tx_start_queue(txq);
	}
}

2092
static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
L
Linus Torvalds 已提交
2093 2094
{
#ifdef CONFIG_NETPOLL_TRAP
2095
	if (netpoll_trap()) {
2096
		netif_tx_start_queue(dev_queue);
L
Linus Torvalds 已提交
2097
		return;
2098
	}
L
Linus Torvalds 已提交
2099
#endif
2100
	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
2101
		__netif_schedule(dev_queue->qdisc);
2102 2103
}

2104 2105 2106 2107 2108 2109 2110
/**
 *	netif_wake_queue - restart transmit
 *	@dev: network device
 *
 *	Allow upper layers to call the device hard_start_xmit routine.
 *	Used for flow control when transmit resources are available.
 */
2111 2112
static inline void netif_wake_queue(struct net_device *dev)
{
2113
	netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
L
Linus Torvalds 已提交
2114 2115
}

2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
static inline void netif_tx_wake_all_queues(struct net_device *dev)
{
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		netif_tx_wake_queue(txq);
	}
}

2126 2127
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
2128
	if (WARN_ON(!dev_queue)) {
2129
		pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
2130 2131
		return;
	}
2132
	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2133 2134
}

2135 2136 2137 2138 2139 2140 2141
/**
 *	netif_stop_queue - stop transmitted packets
 *	@dev: network device
 *
 *	Stop upper layers calling the device hard_start_xmit routine.
 *	Used for flow control when transmit resources are unavailable.
 */
L
Linus Torvalds 已提交
2142 2143
static inline void netif_stop_queue(struct net_device *dev)
{
2144
	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
L
Linus Torvalds 已提交
2145 2146
}

2147 2148 2149 2150 2151 2152 2153 2154 2155 2156
static inline void netif_tx_stop_all_queues(struct net_device *dev)
{
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		netif_tx_stop_queue(txq);
	}
}

2157
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2158
{
2159
	return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2160 2161
}

2162 2163 2164 2165 2166 2167
/**
 *	netif_queue_stopped - test if transmit queue is flowblocked
 *	@dev: network device
 *
 *	Test if transmit queue on device is currently unable to send.
 */
2168
static inline bool netif_queue_stopped(const struct net_device *dev)
L
Linus Torvalds 已提交
2169
{
2170
	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
L
Linus Torvalds 已提交
2171 2172
}

2173
static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2174
{
2175 2176 2177
	return dev_queue->state & QUEUE_STATE_ANY_XOFF;
}

2178
static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2179 2180 2181 2182
{
	return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
}

2183 2184 2185
static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
					unsigned int bytes)
{
T
Tom Herbert 已提交
2186 2187
#ifdef CONFIG_BQL
	dql_queued(&dev_queue->dql, bytes);
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203

	if (likely(dql_avail(&dev_queue->dql) >= 0))
		return;

	set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);

	/*
	 * The XOFF flag must be set before checking the dql_avail below,
	 * because in netdev_tx_completed_queue we update the dql_completed
	 * before checking the XOFF flag.
	 */
	smp_mb();

	/* check again in case another CPU has just made room avail */
	if (unlikely(dql_avail(&dev_queue->dql) >= 0))
		clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
T
Tom Herbert 已提交
2204
#endif
2205 2206
}

2207 2208 2209 2210 2211 2212 2213 2214 2215
/**
 * 	netdev_sent_queue - report the number of bytes queued to hardware
 * 	@dev: network device
 * 	@bytes: number of bytes queued to the hardware device queue
 *
 * 	Report the number of bytes queued for sending/completion to the network
 * 	device hardware queue. @bytes should be a good approximation and should
 * 	exactly match netdev_completed_queue() @bytes
 */
2216 2217 2218 2219 2220 2221
static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{
	netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
}

static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2222
					     unsigned int pkts, unsigned int bytes)
2223
{
T
Tom Herbert 已提交
2224
#ifdef CONFIG_BQL
2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
	if (unlikely(!bytes))
		return;

	dql_completed(&dev_queue->dql, bytes);

	/*
	 * Without the memory barrier there is a small possiblity that
	 * netdev_tx_sent_queue will miss the update and cause the queue to
	 * be stopped forever
	 */
	smp_mb();

	if (dql_avail(&dev_queue->dql) < 0)
		return;

	if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
		netif_schedule_queue(dev_queue);
T
Tom Herbert 已提交
2242
#endif
2243 2244
}

2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
/**
 * 	netdev_completed_queue - report bytes and packets completed by device
 * 	@dev: network device
 * 	@pkts: actual number of packets sent over the medium
 * 	@bytes: actual number of bytes sent over the medium
 *
 * 	Report the number of bytes and packets transmitted by the network device
 * 	hardware queue over the physical medium, @bytes must exactly match the
 * 	@bytes amount passed to netdev_sent_queue()
 */
2255
static inline void netdev_completed_queue(struct net_device *dev,
2256
					  unsigned int pkts, unsigned int bytes)
2257 2258 2259 2260 2261 2262
{
	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
}

static inline void netdev_tx_reset_queue(struct netdev_queue *q)
{
T
Tom Herbert 已提交
2263
#ifdef CONFIG_BQL
2264
	clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
T
Tom Herbert 已提交
2265 2266
	dql_reset(&q->dql);
#endif
2267 2268
}

2269 2270 2271 2272 2273 2274 2275
/**
 * 	netdev_reset_queue - reset the packets and bytes count of a network device
 * 	@dev_queue: network device
 *
 * 	Reset the bytes and packet count of a network device and clear the
 * 	software flow control OFF bit for this network device
 */
2276 2277 2278
static inline void netdev_reset_queue(struct net_device *dev_queue)
{
	netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
2279 2280
}

2281 2282 2283 2284 2285 2286
/**
 *	netif_running - test if up
 *	@dev: network device
 *
 *	Test if the device has been brought up.
 */
2287
static inline bool netif_running(const struct net_device *dev)
L
Linus Torvalds 已提交
2288 2289 2290 2291
{
	return test_bit(__LINK_STATE_START, &dev->state);
}

2292 2293 2294 2295 2296 2297
/*
 * Routines to manage the subqueues on a device.  We only need start
 * stop, and a check if it's stopped.  All other device management is
 * done at the overall netdevice level.
 * Also test the device if we're multiqueue.
 */
2298 2299 2300 2301 2302 2303 2304 2305

/**
 *	netif_start_subqueue - allow sending packets on subqueue
 *	@dev: network device
 *	@queue_index: sub queue index
 *
 * Start individual transmit queue of a device with multiple transmit queues.
 */
2306 2307
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{
2308
	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2309 2310

	netif_tx_start_queue(txq);
2311 2312
}

2313 2314 2315 2316 2317 2318 2319
/**
 *	netif_stop_subqueue - stop sending packets on subqueue
 *	@dev: network device
 *	@queue_index: sub queue index
 *
 * Stop individual transmit queue of a device with multiple transmit queues.
 */
2320 2321
static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
{
2322
	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2323 2324 2325 2326
#ifdef CONFIG_NETPOLL_TRAP
	if (netpoll_trap())
		return;
#endif
2327
	netif_tx_stop_queue(txq);
2328 2329
}

2330 2331 2332 2333 2334 2335 2336
/**
 *	netif_subqueue_stopped - test status of subqueue
 *	@dev: network device
 *	@queue_index: sub queue index
 *
 * Check individual transmit queue of a device with multiple transmit queues.
 */
2337 2338
static inline bool __netif_subqueue_stopped(const struct net_device *dev,
					    u16 queue_index)
2339
{
2340
	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2341 2342

	return netif_tx_queue_stopped(txq);
2343 2344
}

2345 2346
static inline bool netif_subqueue_stopped(const struct net_device *dev,
					  struct sk_buff *skb)
2347 2348 2349
{
	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
2350 2351 2352 2353 2354 2355 2356 2357

/**
 *	netif_wake_subqueue - allow sending packets on subqueue
 *	@dev: network device
 *	@queue_index: sub queue index
 *
 * Resume individual transmit queue of a device with multiple transmit queues.
 */
2358 2359
static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
{
2360
	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2361 2362 2363 2364
#ifdef CONFIG_NETPOLL_TRAP
	if (netpoll_trap())
		return;
#endif
2365
	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2366
		__netif_schedule(txq->qdisc);
2367 2368
}

2369
#ifdef CONFIG_XPS
2370
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2371
			u16 index);
2372 2373
#else
static inline int netif_set_xps_queue(struct net_device *dev,
2374
				      const struct cpumask *mask,
2375 2376 2377 2378 2379 2380
				      u16 index)
{
	return 0;
}
#endif

2381 2382 2383 2384 2385 2386 2387 2388 2389 2390
/*
 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
 * as a distribution range limit for the returned value.
 */
static inline u16 skb_tx_hash(const struct net_device *dev,
			      const struct sk_buff *skb)
{
	return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
}

2391 2392 2393 2394 2395 2396
/**
 *	netif_is_multiqueue - test if device has multiple transmit queues
 *	@dev: network device
 *
 * Check if device has multiple transmit queues
 */
2397
static inline bool netif_is_multiqueue(const struct net_device *dev)
2398
{
E
Eric Dumazet 已提交
2399
	return dev->num_tx_queues > 1;
2400
}
L
Linus Torvalds 已提交
2401

2402
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
2403

2404
#ifdef CONFIG_SYSFS
2405
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
2406 2407 2408 2409 2410 2411 2412 2413
#else
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
						unsigned int rxq)
{
	return 0;
}
#endif

2414 2415 2416
static inline int netif_copy_real_num_queues(struct net_device *to_dev,
					     const struct net_device *from_dev)
{
2417 2418 2419 2420 2421 2422
	int err;

	err = netif_set_real_num_tx_queues(to_dev,
					   from_dev->real_num_tx_queues);
	if (err)
		return err;
2423
#ifdef CONFIG_SYSFS
2424 2425 2426 2427 2428 2429 2430
	return netif_set_real_num_rx_queues(to_dev,
					    from_dev->real_num_rx_queues);
#else
	return 0;
#endif
}

2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
#ifdef CONFIG_SYSFS
static inline unsigned int get_netdev_rx_queue_index(
		struct netdev_rx_queue *queue)
{
	struct net_device *dev = queue->dev;
	int index = queue - dev->_rx;

	BUG_ON(index >= dev->num_rx_queues);
	return index;
}
#endif

2443
#define DEFAULT_MAX_NUM_RSS_QUEUES	(8)
2444
int netif_get_num_default_rss_queues(void);
2445

2446 2447 2448 2449 2450 2451 2452
enum skb_free_reason {
	SKB_REASON_CONSUMED,
	SKB_REASON_DROPPED,
};

void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
L
Linus Torvalds 已提交
2453

2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471
/*
 * It is not allowed to call kfree_skb() or consume_skb() from hardware
 * interrupt context or with hardware interrupts being disabled.
 * (in_irq() || irqs_disabled())
 *
 * We provide four helpers that can be used in following contexts :
 *
 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
 *  replacing kfree_skb(skb)
 *
 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
 *  Typically used in place of consume_skb(skb) in TX completion path
 *
 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
 *  replacing kfree_skb(skb)
 *
 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
 *  and consumed a packet. Used in place of consume_skb(skb)
L
Linus Torvalds 已提交
2472
 */
2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
static inline void dev_kfree_skb_irq(struct sk_buff *skb)
{
	__dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
}

static inline void dev_consume_skb_irq(struct sk_buff *skb)
{
	__dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
}

static inline void dev_kfree_skb_any(struct sk_buff *skb)
{
	__dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
}

static inline void dev_consume_skb_any(struct sk_buff *skb)
{
	__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
}
L
Linus Torvalds 已提交
2492

2493 2494 2495 2496 2497 2498 2499
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
int netif_receive_skb(struct sk_buff *skb);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
2500 2501
struct packet_offload *gro_find_receive_by_type(__be16 type);
struct packet_offload *gro_find_complete_by_type(__be16 type);
2502 2503 2504 2505 2506 2507 2508

static inline void napi_free_frags(struct napi_struct *napi)
{
	kfree_skb(napi->skb);
	napi->skb = NULL;
}

2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519
int netdev_rx_handler_register(struct net_device *dev,
			       rx_handler_func_t *rx_handler,
			       void *rx_handler_data);
void netdev_rx_handler_unregister(struct net_device *dev);

bool dev_valid_name(const char *name);
int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
int dev_ethtool(struct net *net, struct ifreq *);
unsigned int dev_get_flags(const struct net_device *);
int __dev_change_flags(struct net_device *, unsigned int flags);
int dev_change_flags(struct net_device *, unsigned int);
2520 2521
void __dev_notify_flags(struct net_device *, unsigned int old_flags,
			unsigned int gchanges);
2522 2523 2524 2525 2526 2527 2528 2529 2530 2531
int dev_change_name(struct net_device *, const char *);
int dev_set_alias(struct net_device *, const char *, size_t);
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
int dev_set_mtu(struct net_device *, int);
void dev_set_group(struct net_device *, int);
int dev_set_mac_address(struct net_device *, struct sockaddr *);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
			 struct netdev_phys_port_id *ppid);
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2532
			struct netdev_queue *txq);
2533
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
L
Linus Torvalds 已提交
2534

2535
extern int		netdev_budget;
L
Linus Torvalds 已提交
2536 2537

/* Called by rtnetlink.c:rtnl_unlock() */
2538
void netdev_run_todo(void);
L
Linus Torvalds 已提交
2539

2540 2541 2542 2543
/**
 *	dev_put - release reference to device
 *	@dev: network device
 *
2544
 * Release reference to device to allow it to be freed.
2545
 */
L
Linus Torvalds 已提交
2546 2547
static inline void dev_put(struct net_device *dev)
{
2548
	this_cpu_dec(*dev->pcpu_refcnt);
L
Linus Torvalds 已提交
2549 2550
}

2551 2552 2553 2554
/**
 *	dev_hold - get reference to device
 *	@dev: network device
 *
2555
 * Hold reference to device to keep it from being freed.
2556
 */
2557 2558
static inline void dev_hold(struct net_device *dev)
{
2559
	this_cpu_inc(*dev->pcpu_refcnt);
2560
}
L
Linus Torvalds 已提交
2561 2562 2563 2564

/* Carrier loss detection, dial on demand. The functions netif_carrier_on
 * and _off may be called from IRQ context, but it is caller
 * who is responsible for serialization of these calls.
S
Stefan Rompf 已提交
2565 2566 2567 2568
 *
 * The name carrier is inappropriate, these functions should really be
 * called netif_lowerlayer_*() because they represent the state of any
 * kind of lower layer not just hardware media.
L
Linus Torvalds 已提交
2569 2570
 */

2571 2572 2573
void linkwatch_init_dev(struct net_device *dev);
void linkwatch_fire_event(struct net_device *dev);
void linkwatch_forget_dev(struct net_device *dev);
L
Linus Torvalds 已提交
2574

2575 2576 2577 2578 2579 2580
/**
 *	netif_carrier_ok - test if carrier present
 *	@dev: network device
 *
 * Check if carrier is present on device
 */
2581
static inline bool netif_carrier_ok(const struct net_device *dev)
L
Linus Torvalds 已提交
2582 2583 2584 2585
{
	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}

2586
unsigned long dev_trans_start(struct net_device *dev);
2587

2588
void __netdev_watchdog_up(struct net_device *dev);
L
Linus Torvalds 已提交
2589

2590
void netif_carrier_on(struct net_device *dev);
L
Linus Torvalds 已提交
2591

2592
void netif_carrier_off(struct net_device *dev);
L
Linus Torvalds 已提交
2593

2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
/**
 *	netif_dormant_on - mark device as dormant.
 *	@dev: network device
 *
 * Mark device as dormant (as per RFC2863).
 *
 * The dormant state indicates that the relevant interface is not
 * actually in a condition to pass packets (i.e., it is not 'up') but is
 * in a "pending" state, waiting for some external event.  For "on-
 * demand" interfaces, this new state identifies the situation where the
 * interface is waiting for events to place it in the up state.
 *
 */
S
Stefan Rompf 已提交
2607 2608 2609 2610 2611 2612
static inline void netif_dormant_on(struct net_device *dev)
{
	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
		linkwatch_fire_event(dev);
}

2613 2614 2615 2616 2617 2618
/**
 *	netif_dormant_off - set device as not dormant.
 *	@dev: network device
 *
 * Device is not in dormant state.
 */
S
Stefan Rompf 已提交
2619 2620 2621 2622 2623 2624
static inline void netif_dormant_off(struct net_device *dev)
{
	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
		linkwatch_fire_event(dev);
}

2625 2626 2627 2628 2629 2630
/**
 *	netif_dormant - test if carrier present
 *	@dev: network device
 *
 * Check if carrier is present on device
 */
2631
static inline bool netif_dormant(const struct net_device *dev)
S
Stefan Rompf 已提交
2632 2633 2634 2635 2636
{
	return test_bit(__LINK_STATE_DORMANT, &dev->state);
}


2637 2638 2639 2640 2641 2642
/**
 *	netif_oper_up - test if device is operational
 *	@dev: network device
 *
 * Check if carrier is operational
 */
2643
static inline bool netif_oper_up(const struct net_device *dev)
E
Eric Dumazet 已提交
2644
{
S
Stefan Rompf 已提交
2645 2646 2647 2648
	return (dev->operstate == IF_OPER_UP ||
		dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
}

2649 2650 2651 2652 2653 2654
/**
 *	netif_device_present - is device available or removed
 *	@dev: network device
 *
 * Check if device has not been removed from system.
 */
2655
static inline bool netif_device_present(struct net_device *dev)
L
Linus Torvalds 已提交
2656 2657 2658 2659
{
	return test_bit(__LINK_STATE_PRESENT, &dev->state);
}

2660
void netif_device_detach(struct net_device *dev);
L
Linus Torvalds 已提交
2661

2662
void netif_device_attach(struct net_device *dev);
L
Linus Torvalds 已提交
2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712

/*
 * Network interface message level settings
 */

enum {
	NETIF_MSG_DRV		= 0x0001,
	NETIF_MSG_PROBE		= 0x0002,
	NETIF_MSG_LINK		= 0x0004,
	NETIF_MSG_TIMER		= 0x0008,
	NETIF_MSG_IFDOWN	= 0x0010,
	NETIF_MSG_IFUP		= 0x0020,
	NETIF_MSG_RX_ERR	= 0x0040,
	NETIF_MSG_TX_ERR	= 0x0080,
	NETIF_MSG_TX_QUEUED	= 0x0100,
	NETIF_MSG_INTR		= 0x0200,
	NETIF_MSG_TX_DONE	= 0x0400,
	NETIF_MSG_RX_STATUS	= 0x0800,
	NETIF_MSG_PKTDATA	= 0x1000,
	NETIF_MSG_HW		= 0x2000,
	NETIF_MSG_WOL		= 0x4000,
};

#define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
#define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
#define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
#define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
#define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
#define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
#define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
#define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
#define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
#define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
#define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
#define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
#define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
#define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
#define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)

static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
{
	/* use default */
	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
		return default_msg_enable_bits;
	if (debug_value == 0)	/* no output */
		return 0;
	/* set low N bits */
	return (1 << debug_value) - 1;
}

2713
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
H
Herbert Xu 已提交
2714
{
2715 2716
	spin_lock(&txq->_xmit_lock);
	txq->xmit_lock_owner = cpu;
2717 2718
}

2719 2720 2721 2722 2723 2724
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
	spin_lock_bh(&txq->_xmit_lock);
	txq->xmit_lock_owner = smp_processor_id();
}

2725
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
2726
{
2727
	bool ok = spin_trylock(&txq->_xmit_lock);
2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
	if (likely(ok))
		txq->xmit_lock_owner = smp_processor_id();
	return ok;
}

static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
	txq->xmit_lock_owner = -1;
	spin_unlock(&txq->_xmit_lock);
}

static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
	txq->xmit_lock_owner = -1;
	spin_unlock_bh(&txq->_xmit_lock);
}

E
Eric Dumazet 已提交
2745 2746 2747 2748 2749 2750
static inline void txq_trans_update(struct netdev_queue *txq)
{
	if (txq->xmit_lock_owner != -1)
		txq->trans_start = jiffies;
}

2751 2752 2753 2754 2755 2756
/**
 *	netif_tx_lock - grab network device transmit lock
 *	@dev: network device
 *
 * Get network device transmit lock
 */
2757 2758
static inline void netif_tx_lock(struct net_device *dev)
{
2759
	unsigned int i;
2760
	int cpu;
2761

2762 2763
	spin_lock(&dev->tx_global_lock);
	cpu = smp_processor_id();
2764 2765
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2766 2767 2768 2769 2770 2771 2772

		/* We are the only thread of execution doing a
		 * freeze, but we have to grab the _xmit_lock in
		 * order to synchronize with threads which are in
		 * the ->hard_start_xmit() handler and already
		 * checked the frozen bit.
		 */
2773
		__netif_tx_lock(txq, cpu);
2774 2775
		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
		__netif_tx_unlock(txq);
2776
	}
H
Herbert Xu 已提交
2777 2778 2779 2780
}

static inline void netif_tx_lock_bh(struct net_device *dev)
{
2781 2782
	local_bh_disable();
	netif_tx_lock(dev);
H
Herbert Xu 已提交
2783 2784 2785 2786
}

static inline void netif_tx_unlock(struct net_device *dev)
{
2787 2788 2789 2790
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2791

2792 2793 2794 2795 2796
		/* No need to grab the _xmit_lock here.  If the
		 * queue is not stopped for another reason, we
		 * force a schedule.
		 */
		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2797
		netif_schedule_queue(txq);
2798 2799
	}
	spin_unlock(&dev->tx_global_lock);
H
Herbert Xu 已提交
2800 2801 2802 2803
}

static inline void netif_tx_unlock_bh(struct net_device *dev)
{
2804 2805
	netif_tx_unlock(dev);
	local_bh_enable();
H
Herbert Xu 已提交
2806 2807
}

2808
#define HARD_TX_LOCK(dev, txq, cpu) {			\
2809
	if ((dev->features & NETIF_F_LLTX) == 0) {	\
2810
		__netif_tx_lock(txq, cpu);		\
2811 2812 2813
	}						\
}

2814
#define HARD_TX_UNLOCK(dev, txq) {			\
2815
	if ((dev->features & NETIF_F_LLTX) == 0) {	\
2816
		__netif_tx_unlock(txq);			\
2817 2818 2819
	}						\
}

L
Linus Torvalds 已提交
2820 2821
static inline void netif_tx_disable(struct net_device *dev)
{
2822
	unsigned int i;
2823
	int cpu;
2824

2825 2826
	local_bh_disable();
	cpu = smp_processor_id();
2827 2828
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2829 2830

		__netif_tx_lock(txq, cpu);
2831
		netif_tx_stop_queue(txq);
2832
		__netif_tx_unlock(txq);
2833
	}
2834
	local_bh_enable();
L
Linus Torvalds 已提交
2835 2836
}

2837 2838 2839 2840 2841
static inline void netif_addr_lock(struct net_device *dev)
{
	spin_lock(&dev->addr_list_lock);
}

2842 2843 2844 2845 2846
static inline void netif_addr_lock_nested(struct net_device *dev)
{
	spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
}

2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861
static inline void netif_addr_lock_bh(struct net_device *dev)
{
	spin_lock_bh(&dev->addr_list_lock);
}

static inline void netif_addr_unlock(struct net_device *dev)
{
	spin_unlock(&dev->addr_list_lock);
}

static inline void netif_addr_unlock_bh(struct net_device *dev)
{
	spin_unlock_bh(&dev->addr_list_lock);
}

2862
/*
2863
 * dev_addrs walker. Should be used only for read access. Call with
2864 2865 2866
 * rcu_read_lock held.
 */
#define for_each_dev_addr(dev, ha) \
2867
		list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2868

L
Linus Torvalds 已提交
2869 2870
/* These functions live elsewhere (drivers/net/net_init.c, but related) */

2871
void ether_setup(struct net_device *dev);
L
Linus Torvalds 已提交
2872 2873

/* Support for loadable net-drivers */
2874 2875 2876
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
				    void (*setup)(struct net_device *),
				    unsigned int txqs, unsigned int rxqs);
2877
#define alloc_netdev(sizeof_priv, name, setup) \
T
Tom Herbert 已提交
2878 2879 2880 2881 2882
	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)

#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
	alloc_netdev_mqs(sizeof_priv, name, setup, count, count)

2883 2884
int register_netdev(struct net_device *dev);
void unregister_netdev(struct net_device *dev);
2885

2886
/* General hardware address lists handling functions */
2887 2888 2889 2890 2891
int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
		   struct netdev_hw_addr_list *from_list, int addr_len);
void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
		      struct netdev_hw_addr_list *from_list, int addr_len);
void __hw_addr_init(struct netdev_hw_addr_list *list);
2892

2893
/* Functions used for device addresses handling */
2894 2895 2896 2897 2898 2899
int dev_addr_add(struct net_device *dev, const unsigned char *addr,
		 unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
		 unsigned char addr_type);
void dev_addr_flush(struct net_device *dev);
int dev_addr_init(struct net_device *dev);
2900 2901

/* Functions used for unicast addresses handling */
2902 2903 2904 2905 2906 2907 2908 2909
int dev_uc_add(struct net_device *dev, const unsigned char *addr);
int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
int dev_uc_del(struct net_device *dev, const unsigned char *addr);
int dev_uc_sync(struct net_device *to, struct net_device *from);
int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
void dev_uc_unsync(struct net_device *to, struct net_device *from);
void dev_uc_flush(struct net_device *dev);
void dev_uc_init(struct net_device *dev);
2910

2911
/* Functions used for multicast addresses handling */
2912 2913 2914 2915 2916 2917 2918 2919 2920 2921
int dev_mc_add(struct net_device *dev, const unsigned char *addr);
int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
int dev_mc_del(struct net_device *dev, const unsigned char *addr);
int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
int dev_mc_sync(struct net_device *to, struct net_device *from);
int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
void dev_mc_unsync(struct net_device *to, struct net_device *from);
void dev_mc_flush(struct net_device *dev);
void dev_mc_init(struct net_device *dev);
2922

2923
/* Functions used for secondary unicast and multicast support */
2924 2925 2926 2927 2928 2929 2930
void dev_set_rx_mode(struct net_device *dev);
void __dev_set_rx_mode(struct net_device *dev);
int dev_set_promiscuity(struct net_device *dev, int inc);
int dev_set_allmulti(struct net_device *dev, int inc);
void netdev_state_change(struct net_device *dev);
void netdev_notify_peers(struct net_device *dev);
void netdev_features_change(struct net_device *dev);
L
Linus Torvalds 已提交
2931
/* Load a device via the kmod */
2932 2933 2934 2935 2936
void dev_load(struct net *net, const char *name);
struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
					struct rtnl_link_stats64 *storage);
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
			     const struct net_device_stats *netdev_stats);
2937

L
Linus Torvalds 已提交
2938
extern int		netdev_max_backlog;
E
Eric Dumazet 已提交
2939
extern int		netdev_tstamp_prequeue;
L
Linus Torvalds 已提交
2940
extern int		weight_p;
2941
extern int		bpf_jit_enable;
J
Jiri Pirko 已提交
2942

2943 2944 2945
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
						     struct list_head **iter);
2946 2947

/* iterate through upper list, must be called under RCU read lock */
2948 2949 2950 2951 2952
#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
	for (iter = &(dev)->all_adj_list.upper, \
	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
	     updev; \
	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
2953

2954 2955 2956 2957
void *netdev_lower_get_next_private(struct net_device *dev,
				    struct list_head **iter);
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
					struct list_head **iter);
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970

#define netdev_for_each_lower_private(dev, priv, iter) \
	for (iter = (dev)->adj_list.lower.next, \
	     priv = netdev_lower_get_next_private(dev, &(iter)); \
	     priv; \
	     priv = netdev_lower_get_next_private(dev, &(iter)))

#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
	for (iter = &(dev)->adj_list.lower, \
	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
	     priv; \
	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))

2971
void *netdev_adjacent_get_private(struct list_head *adj_list);
2972
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
2973 2974 2975 2976
struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
int netdev_master_upper_dev_link(struct net_device *dev,
J
Jiri Pirko 已提交
2977
				 struct net_device *upper_dev);
2978 2979 2980 2981 2982
int netdev_master_upper_dev_link_private(struct net_device *dev,
					 struct net_device *upper_dev,
					 void *private);
void netdev_upper_dev_unlink(struct net_device *dev,
			     struct net_device *upper_dev);
2983
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
2984 2985 2986 2987 2988 2989 2990
void *netdev_lower_dev_get_private(struct net_device *dev,
				   struct net_device *lower_dev);
int skb_checksum_help(struct sk_buff *skb);
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
				  netdev_features_t features, bool tx_path);
struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
				    netdev_features_t features);
2991 2992 2993 2994 2995 2996

static inline
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
	return __skb_gso_segment(skb, features, true);
}
2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009
__be16 skb_network_protocol(struct sk_buff *skb);

static inline bool can_checksum_protocol(netdev_features_t features,
					 __be16 protocol)
{
	return ((features & NETIF_F_GEN_CSUM) ||
		((features & NETIF_F_V4_CSUM) &&
		 protocol == htons(ETH_P_IP)) ||
		((features & NETIF_F_V6_CSUM) &&
		 protocol == htons(ETH_P_IPV6)) ||
		((features & NETIF_F_FCOE_CRC) &&
		 protocol == htons(ETH_P_FCOE)));
}
3010

3011
#ifdef CONFIG_BUG
3012
void netdev_rx_csum_fault(struct net_device *dev);
3013 3014 3015 3016 3017
#else
static inline void netdev_rx_csum_fault(struct net_device *dev)
{
}
#endif
L
Linus Torvalds 已提交
3018
/* rx skb timestamps */
3019 3020
void net_enable_timestamp(void);
void net_disable_timestamp(void);
L
Linus Torvalds 已提交
3021

3022
#ifdef CONFIG_PROC_FS
3023
int __init dev_proc_init(void);
3024 3025
#else
#define dev_proc_init() 0
3026 3027
#endif

3028 3029 3030 3031
int netdev_class_create_file_ns(struct class_attribute *class_attr,
				const void *ns);
void netdev_class_remove_file_ns(struct class_attribute *class_attr,
				 const void *ns);
3032 3033 3034 3035 3036 3037 3038 3039 3040 3041

static inline int netdev_class_create_file(struct class_attribute *class_attr)
{
	return netdev_class_create_file_ns(class_attr, NULL);
}

static inline void netdev_class_remove_file(struct class_attribute *class_attr)
{
	netdev_class_remove_file_ns(class_attr, NULL);
}
3042

3043 3044
extern struct kobj_ns_type_operations net_ns_type_operations;

3045
const char *netdev_drivername(const struct net_device *dev);
3046

3047
void linkwatch_run_queue(void);
3048

3049 3050
static inline netdev_features_t netdev_get_wanted_features(
	struct net_device *dev)
3051 3052 3053
{
	return (dev->features & ~dev->hw_features) | dev->wanted_features;
}
3054 3055
netdev_features_t netdev_increment_features(netdev_features_t all,
	netdev_features_t one, netdev_features_t mask);
3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066

/* Allow TSO being used on stacked device :
 * Performing the GSO segmentation before last device
 * is a performance improvement.
 */
static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
							netdev_features_t mask)
{
	return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
}

3067
int __netdev_update_features(struct net_device *dev);
3068
void netdev_update_features(struct net_device *dev);
3069
void netdev_change_features(struct net_device *dev);
3070

3071 3072 3073
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
					struct net_device *dev);

3074 3075 3076 3077 3078 3079
netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
					 const struct net_device *dev);
static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
{
	return netif_skb_dev_features(skb, skb->dev);
}
3080

3081
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3082
{
3083
	netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
3084 3085 3086 3087 3088 3089 3090 3091 3092

	/* check flags correspondence */
	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));

3093
	return (features & feature) == feature;
3094 3095
}

3096
static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
3097
{
H
Herbert Xu 已提交
3098
	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
3099
	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
3100 3101
}

3102 3103
static inline bool netif_needs_gso(struct sk_buff *skb,
				   netdev_features_t features)
3104
{
3105
	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
3106 3107
		unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
			 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
3108 3109
}

3110 3111 3112 3113 3114 3115
static inline void netif_set_gso_max_size(struct net_device *dev,
					  unsigned int size)
{
	dev->gso_max_size = size;
}

3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128
static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
					int pulled_hlen, u16 mac_offset,
					int mac_len)
{
	skb->protocol = protocol;
	skb->encapsulation = 1;
	skb_push(skb, pulled_hlen);
	skb_reset_transport_header(skb);
	skb->mac_header = mac_offset;
	skb->network_header = skb->mac_header + mac_len;
	skb->mac_len = mac_len;
}

3129 3130 3131 3132 3133
static inline bool netif_is_macvlan(struct net_device *dev)
{
	return dev->priv_flags & IFF_MACVLAN;
}

3134 3135 3136 3137 3138
static inline bool netif_is_bond_master(struct net_device *dev)
{
	return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
}

3139
static inline bool netif_is_bond_slave(struct net_device *dev)
J
Jiri Pirko 已提交
3140 3141 3142 3143
{
	return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}

3144 3145 3146 3147 3148
static inline bool netif_supports_nofcs(struct net_device *dev)
{
	return dev->priv_flags & IFF_SUPP_NOFCS;
}

3149
extern struct pernet_operations __net_initdata loopback_net_ops;
3150

3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161
/* Logging, debugging and troubleshooting/diagnostic helpers. */

/* netdev_printk helpers, similar to dev_printk */

static inline const char *netdev_name(const struct net_device *dev)
{
	if (dev->reg_state != NETREG_REGISTERED)
		return "(unregistered net_device)";
	return dev->name;
}

3162
__printf(3, 4)
3163 3164
int netdev_printk(const char *level, const struct net_device *dev,
		  const char *format, ...);
3165
__printf(2, 3)
3166
int netdev_emerg(const struct net_device *dev, const char *format, ...);
3167
__printf(2, 3)
3168
int netdev_alert(const struct net_device *dev, const char *format, ...);
3169
__printf(2, 3)
3170
int netdev_crit(const struct net_device *dev, const char *format, ...);
3171
__printf(2, 3)
3172
int netdev_err(const struct net_device *dev, const char *format, ...);
3173
__printf(2, 3)
3174
int netdev_warn(const struct net_device *dev, const char *format, ...);
3175
__printf(2, 3)
3176
int netdev_notice(const struct net_device *dev, const char *format, ...);
3177
__printf(2, 3)
3178
int netdev_info(const struct net_device *dev, const char *format, ...);
3179

3180 3181 3182
#define MODULE_ALIAS_NETDEV(device) \
	MODULE_ALIAS("netdev-" device)

3183
#if defined(CONFIG_DYNAMIC_DEBUG)
3184 3185
#define netdev_dbg(__dev, format, args...)			\
do {								\
3186
	dynamic_netdev_dbg(__dev, format, ##args);		\
3187
} while (0)
3188 3189 3190
#elif defined(DEBUG)
#define netdev_dbg(__dev, format, args...)			\
	netdev_printk(KERN_DEBUG, __dev, format, ##args)
3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217
#else
#define netdev_dbg(__dev, format, args...)			\
({								\
	if (0)							\
		netdev_printk(KERN_DEBUG, __dev, format, ##args); \
	0;							\
})
#endif

#if defined(VERBOSE_DEBUG)
#define netdev_vdbg	netdev_dbg
#else

#define netdev_vdbg(dev, format, args...)			\
({								\
	if (0)							\
		netdev_printk(KERN_DEBUG, dev, format, ##args);	\
	0;							\
})
#endif

/*
 * netdev_WARN() acts like dev_printk(), but with the key difference
 * of using a WARN/WARN_ON to get the message out, including the
 * file/line information and a backtrace.
 */
#define netdev_WARN(dev, format, args...)			\
3218
	WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
3219

3220 3221 3222 3223 3224 3225 3226 3227
/* netif printk helpers, similar to netdev_printk */

#define netif_printk(priv, type, level, dev, fmt, args...)	\
do {					  			\
	if (netif_msg_##type(priv))				\
		netdev_printk(level, (dev), fmt, ##args);	\
} while (0)

3228 3229 3230 3231 3232 3233
#define netif_level(level, priv, type, dev, fmt, args...)	\
do {								\
	if (netif_msg_##type(priv))				\
		netdev_##level(dev, fmt, ##args);		\
} while (0)

3234
#define netif_emerg(priv, type, dev, fmt, args...)		\
3235
	netif_level(emerg, priv, type, dev, fmt, ##args)
3236
#define netif_alert(priv, type, dev, fmt, args...)		\
3237
	netif_level(alert, priv, type, dev, fmt, ##args)
3238
#define netif_crit(priv, type, dev, fmt, args...)		\
3239
	netif_level(crit, priv, type, dev, fmt, ##args)
3240
#define netif_err(priv, type, dev, fmt, args...)		\
3241
	netif_level(err, priv, type, dev, fmt, ##args)
3242
#define netif_warn(priv, type, dev, fmt, args...)		\
3243
	netif_level(warn, priv, type, dev, fmt, ##args)
3244
#define netif_notice(priv, type, dev, fmt, args...)		\
3245
	netif_level(notice, priv, type, dev, fmt, ##args)
3246
#define netif_info(priv, type, dev, fmt, args...)		\
3247
	netif_level(info, priv, type, dev, fmt, ##args)
3248

3249
#if defined(CONFIG_DYNAMIC_DEBUG)
3250 3251 3252
#define netif_dbg(priv, type, netdev, format, args...)		\
do {								\
	if (netif_msg_##type(priv))				\
3253
		dynamic_netdev_dbg(netdev, format, ##args);	\
3254
} while (0)
3255 3256 3257
#elif defined(DEBUG)
#define netif_dbg(priv, type, dev, format, args...)		\
	netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
3258 3259 3260 3261 3262 3263 3264 3265 3266 3267
#else
#define netif_dbg(priv, type, dev, format, args...)			\
({									\
	if (0)								\
		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
	0;								\
})
#endif

#if defined(VERBOSE_DEBUG)
3268
#define netif_vdbg	netif_dbg
3269 3270 3271 3272
#else
#define netif_vdbg(priv, type, dev, format, args...)		\
({								\
	if (0)							\
3273
		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3274 3275 3276
	0;							\
})
#endif
3277

3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307
/*
 *	The list of packet types we will receive (as opposed to discard)
 *	and the routines to invoke.
 *
 *	Why 16. Because with 16 the only overlap we get on a hash of the
 *	low nibble of the protocol value is RARP/SNAP/X.25.
 *
 *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
 *             sure which should go first, but I bet it won't make much
 *             difference if we are running VLANs.  The good news is that
 *             this protocol won't be in the list unless compiled in, so
 *             the average user (w/out VLANs) will not be adversely affected.
 *             --BLG
 *
 *		0800	IP
 *		8100    802.1Q VLAN
 *		0001	802.3
 *		0002	AX.25
 *		0004	802.2
 *		8035	RARP
 *		0005	SNAP
 *		0805	X.25
 *		0806	ARP
 *		8137	IPX
 *		0009	Localtalk
 *		86DD	IPv6
 */
#define PTYPE_HASH_SIZE	(16)
#define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)

3308
#endif	/* _LINUX_NETDEVICE_H */