dev.c 160.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * 	NET3	Protocol independent device support routines.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 *	Derived from the non IP parts of dev.c 1.0.19
10
 * 		Authors:	Ross Biro
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
 *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *				Mark Evans, <evansmp@uhura.aston.ac.uk>
 *
 *	Additional Authors:
 *		Florian la Roche <rzsfl@rz.uni-sb.de>
 *		Alan Cox <gw4pts@gw4pts.ampr.org>
 *		David Hinds <dahinds@users.sourceforge.net>
 *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
 *		Adam Sulmicki <adam@cfar.umd.edu>
 *              Pekka Riikonen <priikone@poesidon.pspt.fi>
 *
 *	Changes:
 *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
 *              			to 2 if register_netdev gets called
 *              			before net_dev_init & also removed a
 *              			few lines of code in the process.
 *		Alan Cox	:	device private ioctl copies fields back.
 *		Alan Cox	:	Transmit queue code does relevant
 *					stunts to keep the queue safe.
 *		Alan Cox	:	Fixed double lock.
 *		Alan Cox	:	Fixed promisc NULL pointer trap
 *		????????	:	Support the full private ioctl range
 *		Alan Cox	:	Moved ioctl permission check into
 *					drivers
 *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
 *		Alan Cox	:	100 backlog just doesn't cut it when
 *					you start doing multicast video 8)
 *		Alan Cox	:	Rewrote net_bh and list manager.
 *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths.
 *		Alan Cox	:	Took out transmit every packet pass
 *					Saved a few bytes in the ioctl handler
 *		Alan Cox	:	Network driver sets packet type before
 *					calling netif_rx. Saves a function
 *					call a packet.
 *		Alan Cox	:	Hashed net_bh()
 *		Richard Kooijman:	Timestamp fixes.
 *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
 *		Alan Cox	:	Device lock protection.
 *		Alan Cox	: 	Fixed nasty side effect of device close
 *					changes.
 *		Rudi Cilibrasi	:	Pass the right thing to
 *					set_mac_address()
 *		Dave Miller	:	32bit quantity for the device lock to
 *					make it work out on a Sparc.
 *		Bjorn Ekwall	:	Added KERNELD hack.
 *		Alan Cox	:	Cleaned up the backlog initialise.
 *		Craig Metz	:	SIOCGIFCONF fix if space for under
 *					1 device.
 *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
 *					is no device open function.
 *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
 *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
 *		Cyrus Durgin	:	Cleaned for KMOD
 *		Adam Sulmicki   :	Bug Fix : Network Device Unload
 *					A network device unload needs to purge
 *					the backlog queue.
 *	Paul Rusty Russell	:	SIOCSIFNAME
 *              Pekka Riikonen  :	Netdev boot-time settings code
 *              Andrew Morton   :       Make unregister_netdevice wait
 *              			indefinitely on dev->refcnt
 * 		J Hadi Salim	:	- Backlog queue sampling
 *				        - netif_rx() feedback
 */

#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
78
#include <linux/capability.h>
L
Linus Torvalds 已提交
79 80 81
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/kernel.h>
82
#include <linux/hash.h>
83
#include <linux/slab.h>
L
Linus Torvalds 已提交
84
#include <linux/sched.h>
A
Arjan van de Ven 已提交
85
#include <linux/mutex.h>
L
Linus Torvalds 已提交
86 87 88 89 90 91 92 93 94
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
95
#include <linux/ethtool.h>
L
Linus Torvalds 已提交
96 97
#include <linux/notifier.h>
#include <linux/skbuff.h>
98
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
99 100 101 102 103 104 105 106
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <net/dst.h>
#include <net/pkt_sched.h>
#include <net/checksum.h>
107
#include <net/xfrm.h>
L
Linus Torvalds 已提交
108 109 110 111 112 113 114
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/netpoll.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
115
#include <net/wext.h>
L
Linus Torvalds 已提交
116 117
#include <net/iw_handler.h>
#include <asm/current.h>
S
Steve Grubb 已提交
118
#include <linux/audit.h>
119
#include <linux/dmaengine.h>
120
#include <linux/err.h>
121
#include <linux/ctype.h>
122
#include <linux/if_arp.h>
123
#include <linux/if_vlan.h>
124
#include <linux/ip.h>
125
#include <net/ip.h>
126 127
#include <linux/ipv6.h>
#include <linux/in.h>
D
David S. Miller 已提交
128 129
#include <linux/jhash.h>
#include <linux/random.h>
130
#include <trace/events/napi.h>
131
#include <trace/events/net.h>
132
#include <trace/events/skb.h>
133
#include <linux/pci.h>
134
#include <linux/inetdevice.h>
135
#include <linux/cpu_rmap.h>
136
#include <linux/net_tstamp.h>
137
#include <linux/static_key.h>
138
#include <net/flow_keys.h>
L
Linus Torvalds 已提交
139

140 141
#include "net-sysfs.h"

142 143 144
/* Instead of increasing this, you should create a hash table. */
#define MAX_GRO_SKBS 8

H
Herbert Xu 已提交
145 146 147
/* This should be increased if a protocol with a bigger head is added. */
#define GRO_MAX_HEAD (MAX_HEADER + 128)

L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155 156 157 158
/*
 *	The list of packet types we will receive (as opposed to discard)
 *	and the routines to invoke.
 *
 *	Why 16. Because with 16 the only overlap we get on a hash of the
 *	low nibble of the protocol value is RARP/SNAP/X.25.
 *
 *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
 *             sure which should go first, but I bet it won't make much
 *             difference if we are running VLANs.  The good news is that
 *             this protocol won't be in the list unless compiled in, so
S
Stephen Hemminger 已提交
159
 *             the average user (w/out VLANs) will not be adversely affected.
L
Linus Torvalds 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
 *             --BLG
 *
 *		0800	IP
 *		8100    802.1Q VLAN
 *		0001	802.3
 *		0002	AX.25
 *		0004	802.2
 *		8035	RARP
 *		0005	SNAP
 *		0805	X.25
 *		0806	ARP
 *		8137	IPX
 *		0009	Localtalk
 *		86DD	IPv6
 */

176 177 178
#define PTYPE_HASH_SIZE	(16)
#define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)

L
Linus Torvalds 已提交
179
static DEFINE_SPINLOCK(ptype_lock);
180
static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
181
static struct list_head ptype_all __read_mostly;	/* Taps */
L
Linus Torvalds 已提交
182 183

/*
184
 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
L
Linus Torvalds 已提交
185 186
 * semaphore.
 *
187
 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
L
Linus Torvalds 已提交
188 189
 *
 * Writers must hold the rtnl semaphore while they loop through the
190
 * dev_base_head list, and hold dev_base_lock for writing when they do the
L
Linus Torvalds 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204
 * actual updates.  This allows pure readers to access the list even
 * while a writer is preparing to update it.
 *
 * To put it another way, dev_base_lock is held for writing only to
 * protect against pure readers; the rtnl semaphore provides the
 * protection against other writers.
 *
 * See, for example usages, register_netdevice() and
 * unregister_netdevice(), which must be called with the rtnl
 * semaphore held.
 */
DEFINE_RWLOCK(dev_base_lock);
EXPORT_SYMBOL(dev_base_lock);

205 206 207 208 209
static inline void dev_base_seq_inc(struct net *net)
{
	while (++net->dev_base_seq == 0);
}

210
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
L
Linus Torvalds 已提交
211 212
{
	unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
213
	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
L
Linus Torvalds 已提交
214 215
}

216
static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
L
Linus Torvalds 已提交
217
{
218
	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
L
Linus Torvalds 已提交
219 220
}

E
Eric Dumazet 已提交
221
static inline void rps_lock(struct softnet_data *sd)
222 223
{
#ifdef CONFIG_RPS
E
Eric Dumazet 已提交
224
	spin_lock(&sd->input_pkt_queue.lock);
225 226 227
#endif
}

E
Eric Dumazet 已提交
228
static inline void rps_unlock(struct softnet_data *sd)
229 230
{
#ifdef CONFIG_RPS
E
Eric Dumazet 已提交
231
	spin_unlock(&sd->input_pkt_queue.lock);
232 233 234
#endif
}

235 236 237
/* Device list insertion */
static int list_netdevice(struct net_device *dev)
{
238
	struct net *net = dev_net(dev);
239 240 241 242

	ASSERT_RTNL();

	write_lock_bh(&dev_base_lock);
243
	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
244
	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
245 246
	hlist_add_head_rcu(&dev->index_hlist,
			   dev_index_hash(net, dev->ifindex));
247
	write_unlock_bh(&dev_base_lock);
248 249 250

	dev_base_seq_inc(net);

251 252 253
	return 0;
}

254 255 256
/* Device list removal
 * caller must respect a RCU grace period before freeing/reusing dev
 */
257 258 259 260 261 262
static void unlist_netdevice(struct net_device *dev)
{
	ASSERT_RTNL();

	/* Unlink dev from the device chain */
	write_lock_bh(&dev_base_lock);
263
	list_del_rcu(&dev->dev_list);
264
	hlist_del_rcu(&dev->name_hlist);
265
	hlist_del_rcu(&dev->index_hlist);
266
	write_unlock_bh(&dev_base_lock);
267 268

	dev_base_seq_inc(dev_net(dev));
269 270
}

L
Linus Torvalds 已提交
271 272 273 274
/*
 *	Our notifier list
 */

275
static RAW_NOTIFIER_HEAD(netdev_chain);
L
Linus Torvalds 已提交
276 277 278 279 280

/*
 *	Device drivers call our routines to queue packets here. We empty the
 *	queue in the local softnet handler.
 */
281

282
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
E
Eric Dumazet 已提交
283
EXPORT_PER_CPU_SYMBOL(softnet_data);
L
Linus Torvalds 已提交
284

285
#ifdef CONFIG_LOCKDEP
286
/*
287
 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
 * according to dev->type
 */
static const unsigned short netdev_lock_type[] =
	{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
	 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
304
	 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
305
	 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
306
	 ARPHRD_VOID, ARPHRD_NONE};
307

308
static const char *const netdev_lock_name[] =
309 310 311 312 313 314 315 316 317 318 319 320 321
	{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
	 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
	 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
	 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
	 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
	 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
	 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
	 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
	 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
	 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
	 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
	 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
	 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
322
	 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
323
	 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
324
	 "_xmit_VOID", "_xmit_NONE"};
325 326

static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
327
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
328 329 330 331 332 333 334 335 336 337 338 339

static inline unsigned short netdev_lock_pos(unsigned short dev_type)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
		if (netdev_lock_type[i] == dev_type)
			return i;
	/* the last key is used by default */
	return ARRAY_SIZE(netdev_lock_type) - 1;
}

340 341
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
						 unsigned short dev_type)
342 343 344 345 346 347 348
{
	int i;

	i = netdev_lock_pos(dev_type);
	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
				   netdev_lock_name[i]);
}
349 350 351 352 353 354 355 356 357 358

static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{
	int i;

	i = netdev_lock_pos(dev->type);
	lockdep_set_class_and_name(&dev->addr_list_lock,
				   &netdev_addr_lock_key[i],
				   netdev_lock_name[i]);
}
359
#else
360 361 362 363 364
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
						 unsigned short dev_type)
{
}
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
365 366 367
{
}
#endif
L
Linus Torvalds 已提交
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390

/*******************************************************************************

		Protocol management and registration routines

*******************************************************************************/

/*
 *	Add a protocol ID to the list. Now that the input handler is
 *	smarter we can dispense with all the messy stuff that used to be
 *	here.
 *
 *	BEWARE!!! Protocol handlers, mangling input packets,
 *	MUST BE last in hash buckets and checking protocol handlers
 *	MUST start from promiscuous ptype_all chain in net_bh.
 *	It is true now, do not change it.
 *	Explanation follows: if protocol handler, mangling packet, will
 *	be the first on list, it is not able to sense, that packet
 *	is cloned and should be copied-on-write, so that it will
 *	change it and subsequent readers will get broken packet.
 *							--ANK (980803)
 */

391 392 393 394 395 396 397 398
static inline struct list_head *ptype_head(const struct packet_type *pt)
{
	if (pt->type == htons(ETH_P_ALL))
		return &ptype_all;
	else
		return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
}

L
Linus Torvalds 已提交
399 400 401 402 403 404 405 406
/**
 *	dev_add_pack - add packet handler
 *	@pt: packet type declaration
 *
 *	Add a protocol handler to the networking stack. The passed &packet_type
 *	is linked into kernel lists and may not be freed until it has been
 *	removed from the kernel lists.
 *
407
 *	This call does not sleep therefore it can not
L
Linus Torvalds 已提交
408 409 410 411 412 413
 *	guarantee all CPU's that are in middle of receiving packets
 *	will see the new packet type (until the next received packet).
 */

void dev_add_pack(struct packet_type *pt)
{
414
	struct list_head *head = ptype_head(pt);
L
Linus Torvalds 已提交
415

416 417 418
	spin_lock(&ptype_lock);
	list_add_rcu(&pt->list, head);
	spin_unlock(&ptype_lock);
L
Linus Torvalds 已提交
419
}
E
Eric Dumazet 已提交
420
EXPORT_SYMBOL(dev_add_pack);
L
Linus Torvalds 已提交
421 422 423 424 425 426 427 428

/**
 *	__dev_remove_pack	 - remove packet handler
 *	@pt: packet type declaration
 *
 *	Remove a protocol handler that was previously added to the kernel
 *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
 *	from the kernel lists and can be freed or reused once this function
429
 *	returns.
L
Linus Torvalds 已提交
430 431 432 433 434 435 436
 *
 *      The packet type might still be in use by receivers
 *	and must not be freed until after all the CPU's have gone
 *	through a quiescent state.
 */
void __dev_remove_pack(struct packet_type *pt)
{
437
	struct list_head *head = ptype_head(pt);
L
Linus Torvalds 已提交
438 439
	struct packet_type *pt1;

440
	spin_lock(&ptype_lock);
L
Linus Torvalds 已提交
441 442 443 444 445 446 447 448

	list_for_each_entry(pt1, head, list) {
		if (pt == pt1) {
			list_del_rcu(&pt->list);
			goto out;
		}
	}

449
	pr_warn("dev_remove_pack: %p not found\n", pt);
L
Linus Torvalds 已提交
450
out:
451
	spin_unlock(&ptype_lock);
L
Linus Torvalds 已提交
452
}
E
Eric Dumazet 已提交
453 454
EXPORT_SYMBOL(__dev_remove_pack);

L
Linus Torvalds 已提交
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
/**
 *	dev_remove_pack	 - remove packet handler
 *	@pt: packet type declaration
 *
 *	Remove a protocol handler that was previously added to the kernel
 *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
 *	from the kernel lists and can be freed or reused once this function
 *	returns.
 *
 *	This call sleeps to guarantee that no CPU is looking at the packet
 *	type after return.
 */
void dev_remove_pack(struct packet_type *pt)
{
	__dev_remove_pack(pt);
470

L
Linus Torvalds 已提交
471 472
	synchronize_net();
}
E
Eric Dumazet 已提交
473
EXPORT_SYMBOL(dev_remove_pack);
L
Linus Torvalds 已提交
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501

/******************************************************************************

		      Device Boot-time Settings Routines

*******************************************************************************/

/* Boot time configuration table */
static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];

/**
 *	netdev_boot_setup_add	- add new setup entry
 *	@name: name of the device
 *	@map: configured settings for the device
 *
 *	Adds new setup entry to the dev_boot_setup list.  The function
 *	returns 0 on error and 1 on success.  This is a generic routine to
 *	all netdevices.
 */
static int netdev_boot_setup_add(char *name, struct ifmap *map)
{
	struct netdev_boot_setup *s;
	int i;

	s = dev_boot_setup;
	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
			memset(s[i].name, 0, sizeof(s[i].name));
502
			strlcpy(s[i].name, name, IFNAMSIZ);
L
Linus Torvalds 已提交
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
			memcpy(&s[i].map, map, sizeof(s[i].map));
			break;
		}
	}

	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
}

/**
 *	netdev_boot_setup_check	- check boot time settings
 *	@dev: the netdevice
 *
 * 	Check boot time settings for the device.
 *	The found settings are set for the device to be used
 *	later in the device probing.
 *	Returns 0 if no settings found, 1 if they are.
 */
int netdev_boot_setup_check(struct net_device *dev)
{
	struct netdev_boot_setup *s = dev_boot_setup;
	int i;

	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
527
		    !strcmp(dev->name, s[i].name)) {
L
Linus Torvalds 已提交
528 529 530 531 532 533 534 535 536
			dev->irq 	= s[i].map.irq;
			dev->base_addr 	= s[i].map.base_addr;
			dev->mem_start 	= s[i].map.mem_start;
			dev->mem_end 	= s[i].map.mem_end;
			return 1;
		}
	}
	return 0;
}
E
Eric Dumazet 已提交
537
EXPORT_SYMBOL(netdev_boot_setup_check);
L
Linus Torvalds 已提交
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561


/**
 *	netdev_boot_base	- get address from boot time settings
 *	@prefix: prefix for network device
 *	@unit: id for network device
 *
 * 	Check boot time settings for the base address of device.
 *	The found settings are set for the device to be used
 *	later in the device probing.
 *	Returns 0 if no settings found.
 */
unsigned long netdev_boot_base(const char *prefix, int unit)
{
	const struct netdev_boot_setup *s = dev_boot_setup;
	char name[IFNAMSIZ];
	int i;

	sprintf(name, "%s%d", prefix, unit);

	/*
	 * If device already registered then return base of 1
	 * to indicate not to probe for this interface
	 */
562
	if (__dev_get_by_name(&init_net, name))
L
Linus Torvalds 已提交
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
		return 1;

	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
		if (!strcmp(name, s[i].name))
			return s[i].map.base_addr;
	return 0;
}

/*
 * Saves at boot time configured settings for any netdevice.
 */
int __init netdev_boot_setup(char *str)
{
	int ints[5];
	struct ifmap map;

	str = get_options(str, ARRAY_SIZE(ints), ints);
	if (!str || !*str)
		return 0;

	/* Save settings */
	memset(&map, 0, sizeof(map));
	if (ints[0] > 0)
		map.irq = ints[1];
	if (ints[0] > 1)
		map.base_addr = ints[2];
	if (ints[0] > 2)
		map.mem_start = ints[3];
	if (ints[0] > 3)
		map.mem_end = ints[4];

	/* Add new entry to the list */
	return netdev_boot_setup_add(str, &map);
}

__setup("netdev=", netdev_boot_setup);

/*******************************************************************************

			    Device Interface Subroutines

*******************************************************************************/

/**
 *	__dev_get_by_name	- find a device by its name
608
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
609 610 611 612 613 614 615 616 617
 *	@name: name to find
 *
 *	Find an interface by name. Must be called under RTNL semaphore
 *	or @dev_base_lock. If the name is found a pointer to the device
 *	is returned. If the name is not found then %NULL is returned. The
 *	reference counters are not incremented so the caller must be
 *	careful with locks.
 */

618
struct net_device *__dev_get_by_name(struct net *net, const char *name)
L
Linus Torvalds 已提交
619 620
{
	struct hlist_node *p;
E
Eric Dumazet 已提交
621 622
	struct net_device *dev;
	struct hlist_head *head = dev_name_hash(net, name);
L
Linus Torvalds 已提交
623

E
Eric Dumazet 已提交
624
	hlist_for_each_entry(dev, p, head, name_hlist)
L
Linus Torvalds 已提交
625 626
		if (!strncmp(dev->name, name, IFNAMSIZ))
			return dev;
E
Eric Dumazet 已提交
627

L
Linus Torvalds 已提交
628 629
	return NULL;
}
E
Eric Dumazet 已提交
630
EXPORT_SYMBOL(__dev_get_by_name);
L
Linus Torvalds 已提交
631

632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
/**
 *	dev_get_by_name_rcu	- find a device by its name
 *	@net: the applicable net namespace
 *	@name: name to find
 *
 *	Find an interface by name.
 *	If the name is found a pointer to the device is returned.
 * 	If the name is not found then %NULL is returned.
 *	The reference counters are not incremented so the caller must be
 *	careful with locks. The caller must hold RCU lock.
 */

struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
{
	struct hlist_node *p;
	struct net_device *dev;
	struct hlist_head *head = dev_name_hash(net, name);

	hlist_for_each_entry_rcu(dev, p, head, name_hlist)
		if (!strncmp(dev->name, name, IFNAMSIZ))
			return dev;

	return NULL;
}
EXPORT_SYMBOL(dev_get_by_name_rcu);

L
Linus Torvalds 已提交
658 659
/**
 *	dev_get_by_name		- find a device by its name
660
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
661 662 663 664 665 666 667 668 669
 *	@name: name to find
 *
 *	Find an interface by name. This can be called from any
 *	context and does its own locking. The returned handle has
 *	the usage count incremented and the caller must use dev_put() to
 *	release it when it is no longer needed. %NULL is returned if no
 *	matching device is found.
 */

670
struct net_device *dev_get_by_name(struct net *net, const char *name)
L
Linus Torvalds 已提交
671 672 673
{
	struct net_device *dev;

674 675
	rcu_read_lock();
	dev = dev_get_by_name_rcu(net, name);
L
Linus Torvalds 已提交
676 677
	if (dev)
		dev_hold(dev);
678
	rcu_read_unlock();
L
Linus Torvalds 已提交
679 680
	return dev;
}
E
Eric Dumazet 已提交
681
EXPORT_SYMBOL(dev_get_by_name);
L
Linus Torvalds 已提交
682 683 684

/**
 *	__dev_get_by_index - find a device by its ifindex
685
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
686 687 688 689 690 691 692 693 694
 *	@ifindex: index of device
 *
 *	Search for an interface by index. Returns %NULL if the device
 *	is not found or a pointer to the device. The device has not
 *	had its reference counter increased so the caller must be careful
 *	about locking. The caller must hold either the RTNL semaphore
 *	or @dev_base_lock.
 */

695
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
L
Linus Torvalds 已提交
696 697
{
	struct hlist_node *p;
E
Eric Dumazet 已提交
698 699
	struct net_device *dev;
	struct hlist_head *head = dev_index_hash(net, ifindex);
L
Linus Torvalds 已提交
700

E
Eric Dumazet 已提交
701
	hlist_for_each_entry(dev, p, head, index_hlist)
L
Linus Torvalds 已提交
702 703
		if (dev->ifindex == ifindex)
			return dev;
E
Eric Dumazet 已提交
704

L
Linus Torvalds 已提交
705 706
	return NULL;
}
E
Eric Dumazet 已提交
707
EXPORT_SYMBOL(__dev_get_by_index);
L
Linus Torvalds 已提交
708

709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
/**
 *	dev_get_by_index_rcu - find a device by its ifindex
 *	@net: the applicable net namespace
 *	@ifindex: index of device
 *
 *	Search for an interface by index. Returns %NULL if the device
 *	is not found or a pointer to the device. The device has not
 *	had its reference counter increased so the caller must be careful
 *	about locking. The caller must hold RCU lock.
 */

struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
{
	struct hlist_node *p;
	struct net_device *dev;
	struct hlist_head *head = dev_index_hash(net, ifindex);

	hlist_for_each_entry_rcu(dev, p, head, index_hlist)
		if (dev->ifindex == ifindex)
			return dev;

	return NULL;
}
EXPORT_SYMBOL(dev_get_by_index_rcu);

L
Linus Torvalds 已提交
734 735 736

/**
 *	dev_get_by_index - find a device by its ifindex
737
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
738 739 740 741 742 743 744 745
 *	@ifindex: index of device
 *
 *	Search for an interface by index. Returns NULL if the device
 *	is not found or a pointer to the device. The device returned has
 *	had a reference added and the pointer is safe until the user calls
 *	dev_put to indicate they have finished with it.
 */

746
struct net_device *dev_get_by_index(struct net *net, int ifindex)
L
Linus Torvalds 已提交
747 748 749
{
	struct net_device *dev;

750 751
	rcu_read_lock();
	dev = dev_get_by_index_rcu(net, ifindex);
L
Linus Torvalds 已提交
752 753
	if (dev)
		dev_hold(dev);
754
	rcu_read_unlock();
L
Linus Torvalds 已提交
755 756
	return dev;
}
E
Eric Dumazet 已提交
757
EXPORT_SYMBOL(dev_get_by_index);
L
Linus Torvalds 已提交
758 759

/**
760
 *	dev_getbyhwaddr_rcu - find a device by its hardware address
761
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
762 763 764 765
 *	@type: media type of device
 *	@ha: hardware address
 *
 *	Search for an interface by MAC address. Returns NULL if the device
E
Eric Dumazet 已提交
766 767
 *	is not found or a pointer to the device.
 *	The caller must hold RCU or RTNL.
768
 *	The returned device has not had its ref count increased
L
Linus Torvalds 已提交
769 770 771 772
 *	and the caller must therefore be careful about locking
 *
 */

773 774
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
				       const char *ha)
L
Linus Torvalds 已提交
775 776 777
{
	struct net_device *dev;

778
	for_each_netdev_rcu(net, dev)
L
Linus Torvalds 已提交
779 780
		if (dev->type == type &&
		    !memcmp(dev->dev_addr, ha, dev->addr_len))
781 782 783
			return dev;

	return NULL;
L
Linus Torvalds 已提交
784
}
785
EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
786

787
struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
L
Linus Torvalds 已提交
788 789 790
{
	struct net_device *dev;

791
	ASSERT_RTNL();
792
	for_each_netdev(net, dev)
793
		if (dev->type == type)
794 795 796
			return dev;

	return NULL;
797 798 799
}
EXPORT_SYMBOL(__dev_getfirstbyhwtype);

800
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
801
{
802
	struct net_device *dev, *ret = NULL;
803

804 805 806 807 808 809 810 811 812
	rcu_read_lock();
	for_each_netdev_rcu(net, dev)
		if (dev->type == type) {
			dev_hold(dev);
			ret = dev;
			break;
		}
	rcu_read_unlock();
	return ret;
L
Linus Torvalds 已提交
813 814 815 816
}
EXPORT_SYMBOL(dev_getfirstbyhwtype);

/**
E
Eric Dumazet 已提交
817
 *	dev_get_by_flags_rcu - find any device with given flags
818
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
819 820 821 822
 *	@if_flags: IFF_* values
 *	@mask: bitmask of bits in if_flags to check
 *
 *	Search for any interface with the given flags. Returns NULL if a device
E
Eric Dumazet 已提交
823 824
 *	is not found or a pointer to the device. Must be called inside
 *	rcu_read_lock(), and result refcount is unchanged.
L
Linus Torvalds 已提交
825 826
 */

E
Eric Dumazet 已提交
827
struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
E
Eric Dumazet 已提交
828
				    unsigned short mask)
L
Linus Torvalds 已提交
829
{
830
	struct net_device *dev, *ret;
L
Linus Torvalds 已提交
831

832
	ret = NULL;
833
	for_each_netdev_rcu(net, dev) {
L
Linus Torvalds 已提交
834
		if (((dev->flags ^ if_flags) & mask) == 0) {
835
			ret = dev;
L
Linus Torvalds 已提交
836 837 838
			break;
		}
	}
839
	return ret;
L
Linus Torvalds 已提交
840
}
E
Eric Dumazet 已提交
841
EXPORT_SYMBOL(dev_get_by_flags_rcu);
L
Linus Torvalds 已提交
842 843 844 845 846 847

/**
 *	dev_valid_name - check if name is okay for network device
 *	@name: name string
 *
 *	Network device names need to be valid file names to
848 849
 *	to allow sysfs to work.  We also disallow any kind of
 *	whitespace.
L
Linus Torvalds 已提交
850
 */
851
bool dev_valid_name(const char *name)
L
Linus Torvalds 已提交
852
{
853
	if (*name == '\0')
854
		return false;
855
	if (strlen(name) >= IFNAMSIZ)
856
		return false;
857
	if (!strcmp(name, ".") || !strcmp(name, ".."))
858
		return false;
859 860 861

	while (*name) {
		if (*name == '/' || isspace(*name))
862
			return false;
863 864
		name++;
	}
865
	return true;
L
Linus Torvalds 已提交
866
}
E
Eric Dumazet 已提交
867
EXPORT_SYMBOL(dev_valid_name);
L
Linus Torvalds 已提交
868 869

/**
870 871
 *	__dev_alloc_name - allocate a name for a device
 *	@net: network namespace to allocate the device name in
L
Linus Torvalds 已提交
872
 *	@name: name format string
873
 *	@buf:  scratch buffer and result name string
L
Linus Torvalds 已提交
874 875
 *
 *	Passed a format string - eg "lt%d" it will try and find a suitable
S
Stephen Hemminger 已提交
876 877 878 879 880 881
 *	id. It scans list of devices to build up a free map, then chooses
 *	the first empty slot. The caller must hold the dev_base or rtnl lock
 *	while allocating the name and adding the device in order to avoid
 *	duplicates.
 *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
 *	Returns the number of the unit assigned or a negative errno code.
L
Linus Torvalds 已提交
882 883
 */

884
static int __dev_alloc_name(struct net *net, const char *name, char *buf)
L
Linus Torvalds 已提交
885 886 887 888
{
	int i = 0;
	const char *p;
	const int max_netdevices = 8*PAGE_SIZE;
S
Stephen Hemminger 已提交
889
	unsigned long *inuse;
L
Linus Torvalds 已提交
890 891 892 893 894 895 896 897 898 899 900 901 902
	struct net_device *d;

	p = strnchr(name, IFNAMSIZ-1, '%');
	if (p) {
		/*
		 * Verify the string as this thing may have come from
		 * the user.  There must be either one "%d" and no other "%"
		 * characters.
		 */
		if (p[1] != 'd' || strchr(p + 2, '%'))
			return -EINVAL;

		/* Use one page as a bit array of possible slots */
S
Stephen Hemminger 已提交
903
		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
L
Linus Torvalds 已提交
904 905 906
		if (!inuse)
			return -ENOMEM;

907
		for_each_netdev(net, d) {
L
Linus Torvalds 已提交
908 909 910 911 912 913
			if (!sscanf(d->name, name, &i))
				continue;
			if (i < 0 || i >= max_netdevices)
				continue;

			/*  avoid cases where sscanf is not exact inverse of printf */
914
			snprintf(buf, IFNAMSIZ, name, i);
L
Linus Torvalds 已提交
915 916 917 918 919 920 921 922
			if (!strncmp(buf, d->name, IFNAMSIZ))
				set_bit(i, inuse);
		}

		i = find_first_zero_bit(inuse, max_netdevices);
		free_page((unsigned long) inuse);
	}

923 924
	if (buf != name)
		snprintf(buf, IFNAMSIZ, name, i);
925
	if (!__dev_get_by_name(net, buf))
L
Linus Torvalds 已提交
926 927 928 929 930 931 932 933 934
		return i;

	/* It is possible to run out of possible slots
	 * when the name is long and there isn't enough space left
	 * for the digits, or if all bits are used.
	 */
	return -ENFILE;
}

935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
/**
 *	dev_alloc_name - allocate a name for a device
 *	@dev: device
 *	@name: name format string
 *
 *	Passed a format string - eg "lt%d" it will try and find a suitable
 *	id. It scans list of devices to build up a free map, then chooses
 *	the first empty slot. The caller must hold the dev_base or rtnl lock
 *	while allocating the name and adding the device in order to avoid
 *	duplicates.
 *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
 *	Returns the number of the unit assigned or a negative errno code.
 */

int dev_alloc_name(struct net_device *dev, const char *name)
{
	char buf[IFNAMSIZ];
	struct net *net;
	int ret;

955 956
	BUG_ON(!dev_net(dev));
	net = dev_net(dev);
957 958 959 960 961
	ret = __dev_alloc_name(net, name, buf);
	if (ret >= 0)
		strlcpy(dev->name, buf, IFNAMSIZ);
	return ret;
}
E
Eric Dumazet 已提交
962
EXPORT_SYMBOL(dev_alloc_name);
963

964
static int dev_get_valid_name(struct net_device *dev, const char *name)
965
{
966 967 968 969 970
	struct net *net;

	BUG_ON(!dev_net(dev));
	net = dev_net(dev);

971 972 973
	if (!dev_valid_name(name))
		return -EINVAL;

974
	if (strchr(name, '%'))
975
		return dev_alloc_name(dev, name);
976 977
	else if (__dev_get_by_name(net, name))
		return -EEXIST;
978 979
	else if (dev->name != name)
		strlcpy(dev->name, name, IFNAMSIZ);
980 981 982

	return 0;
}
L
Linus Torvalds 已提交
983 984 985 986 987 988 989 990 991

/**
 *	dev_change_name - change name of a device
 *	@dev: device
 *	@newname: name (or format string) must be at least IFNAMSIZ
 *
 *	Change name of a device, can pass format strings "eth%d".
 *	for wildcarding.
 */
992
int dev_change_name(struct net_device *dev, const char *newname)
L
Linus Torvalds 已提交
993
{
994
	char oldname[IFNAMSIZ];
L
Linus Torvalds 已提交
995
	int err = 0;
996
	int ret;
997
	struct net *net;
L
Linus Torvalds 已提交
998 999

	ASSERT_RTNL();
1000
	BUG_ON(!dev_net(dev));
L
Linus Torvalds 已提交
1001

1002
	net = dev_net(dev);
L
Linus Torvalds 已提交
1003 1004 1005
	if (dev->flags & IFF_UP)
		return -EBUSY;

1006 1007 1008
	if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
		return 0;

1009 1010
	memcpy(oldname, dev->name, IFNAMSIZ);

1011
	err = dev_get_valid_name(dev, newname);
1012 1013
	if (err < 0)
		return err;
L
Linus Torvalds 已提交
1014

1015
rollback:
1016 1017 1018 1019
	ret = device_rename(&dev->dev, dev->name);
	if (ret) {
		memcpy(dev->name, oldname, IFNAMSIZ);
		return ret;
1020
	}
1021 1022

	write_lock_bh(&dev_base_lock);
1023
	hlist_del_rcu(&dev->name_hlist);
1024 1025 1026 1027 1028 1029
	write_unlock_bh(&dev_base_lock);

	synchronize_rcu();

	write_lock_bh(&dev_base_lock);
	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1030 1031
	write_unlock_bh(&dev_base_lock);

1032
	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1033 1034 1035
	ret = notifier_to_errno(ret);

	if (ret) {
1036 1037
		/* err >= 0 after dev_alloc_name() or stores the first errno */
		if (err >= 0) {
1038 1039 1040
			err = ret;
			memcpy(dev->name, oldname, IFNAMSIZ);
			goto rollback;
1041
		} else {
1042
			pr_err("%s: name change rollback failed: %d\n",
1043
			       dev->name, ret);
1044 1045
		}
	}
L
Linus Torvalds 已提交
1046 1047 1048 1049

	return err;
}

1050 1051 1052 1053
/**
 *	dev_set_alias - change ifalias of a device
 *	@dev: device
 *	@alias: name up to IFALIASZ
1054
 *	@len: limit of bytes to copy from info
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
 *
 *	Set ifalias for a device,
 */
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{
	ASSERT_RTNL();

	if (len >= IFALIASZ)
		return -EINVAL;

1065 1066 1067 1068 1069 1070 1071 1072
	if (!len) {
		if (dev->ifalias) {
			kfree(dev->ifalias);
			dev->ifalias = NULL;
		}
		return 0;
	}

E
Eric Dumazet 已提交
1073
	dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1074 1075 1076 1077 1078 1079 1080 1081
	if (!dev->ifalias)
		return -ENOMEM;

	strlcpy(dev->ifalias, alias, len+1);
	return len;
}


1082
/**
S
Stephen Hemminger 已提交
1083
 *	netdev_features_change - device changes features
1084 1085 1086 1087 1088 1089
 *	@dev: device to cause notification
 *
 *	Called to indicate a device has changed features.
 */
void netdev_features_change(struct net_device *dev)
{
1090
	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1091 1092 1093
}
EXPORT_SYMBOL(netdev_features_change);

L
Linus Torvalds 已提交
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
/**
 *	netdev_state_change - device changes state
 *	@dev: device to cause notification
 *
 *	Called to indicate a device has changed state. This function calls
 *	the notifier chains for netdev_chain and sends a NEWLINK message
 *	to the routing socket.
 */
void netdev_state_change(struct net_device *dev)
{
	if (dev->flags & IFF_UP) {
1105
		call_netdevice_notifiers(NETDEV_CHANGE, dev);
L
Linus Torvalds 已提交
1106 1107 1108
		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
	}
}
E
Eric Dumazet 已提交
1109
EXPORT_SYMBOL(netdev_state_change);
L
Linus Torvalds 已提交
1110

1111
int netdev_bonding_change(struct net_device *dev, unsigned long event)
1112
{
1113
	return call_netdevice_notifiers(event, dev);
1114 1115 1116
}
EXPORT_SYMBOL(netdev_bonding_change);

L
Linus Torvalds 已提交
1117 1118
/**
 *	dev_load 	- load a network module
1119
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
1120 1121 1122 1123 1124 1125 1126
 *	@name: name of interface
 *
 *	If a network interface is not present and the process has suitable
 *	privileges this function loads the module. If module loading is not
 *	available in this kernel then it becomes a nop.
 */

1127
void dev_load(struct net *net, const char *name)
L
Linus Torvalds 已提交
1128
{
1129
	struct net_device *dev;
1130
	int no_module;
L
Linus Torvalds 已提交
1131

1132 1133 1134
	rcu_read_lock();
	dev = dev_get_by_name_rcu(net, name);
	rcu_read_unlock();
L
Linus Torvalds 已提交
1135

1136 1137 1138 1139 1140
	no_module = !dev;
	if (no_module && capable(CAP_NET_ADMIN))
		no_module = request_module("netdev-%s", name);
	if (no_module && capable(CAP_SYS_MODULE)) {
		if (!request_module("%s", name))
1141 1142
			pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
			       name);
1143
	}
L
Linus Torvalds 已提交
1144
}
E
Eric Dumazet 已提交
1145
EXPORT_SYMBOL(dev_load);
L
Linus Torvalds 已提交
1146

1147
static int __dev_open(struct net_device *dev)
L
Linus Torvalds 已提交
1148
{
1149
	const struct net_device_ops *ops = dev->netdev_ops;
1150
	int ret;
L
Linus Torvalds 已提交
1151

1152 1153
	ASSERT_RTNL();

L
Linus Torvalds 已提交
1154 1155 1156
	if (!netif_device_present(dev))
		return -ENODEV;

1157 1158 1159 1160 1161
	ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
	ret = notifier_to_errno(ret);
	if (ret)
		return ret;

L
Linus Torvalds 已提交
1162
	set_bit(__LINK_STATE_START, &dev->state);
1163

1164 1165
	if (ops->ndo_validate_addr)
		ret = ops->ndo_validate_addr(dev);
1166

1167 1168
	if (!ret && ops->ndo_open)
		ret = ops->ndo_open(dev);
L
Linus Torvalds 已提交
1169

1170 1171 1172
	if (ret)
		clear_bit(__LINK_STATE_START, &dev->state);
	else {
L
Linus Torvalds 已提交
1173
		dev->flags |= IFF_UP;
1174
		net_dmaengine_get();
1175
		dev_set_rx_mode(dev);
L
Linus Torvalds 已提交
1176 1177
		dev_activate(dev);
	}
1178

L
Linus Torvalds 已提交
1179 1180 1181 1182
	return ret;
}

/**
1183 1184
 *	dev_open	- prepare an interface for use.
 *	@dev:	device to open
L
Linus Torvalds 已提交
1185
 *
1186 1187 1188 1189 1190 1191 1192
 *	Takes a device from down to up state. The device's private open
 *	function is invoked and then the multicast lists are loaded. Finally
 *	the device is moved into the up state and a %NETDEV_UP message is
 *	sent to the netdev notifier chain.
 *
 *	Calling this function on an active interface is a nop. On a failure
 *	a negative errno code is returned.
L
Linus Torvalds 已提交
1193
 */
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
int dev_open(struct net_device *dev)
{
	int ret;

	if (dev->flags & IFF_UP)
		return 0;

	ret = __dev_open(dev);
	if (ret < 0)
		return ret;

	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
	call_netdevice_notifiers(NETDEV_UP, dev);

	return ret;
}
EXPORT_SYMBOL(dev_open);

1212
static int __dev_close_many(struct list_head *head)
L
Linus Torvalds 已提交
1213
{
1214
	struct net_device *dev;
1215

1216
	ASSERT_RTNL();
1217 1218
	might_sleep();

1219 1220
	list_for_each_entry(dev, head, unreg_list) {
		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
L
Linus Torvalds 已提交
1221

1222
		clear_bit(__LINK_STATE_START, &dev->state);
L
Linus Torvalds 已提交
1223

1224 1225 1226 1227 1228 1229 1230 1231
		/* Synchronize to scheduled poll. We cannot touch poll list, it
		 * can be even on different cpu. So just clear netif_running().
		 *
		 * dev->stop() will invoke napi_disable() on all of it's
		 * napi_struct instances on this device.
		 */
		smp_mb__after_clear_bit(); /* Commit netif_running(). */
	}
L
Linus Torvalds 已提交
1232

1233
	dev_deactivate_many(head);
1234

1235 1236
	list_for_each_entry(dev, head, unreg_list) {
		const struct net_device_ops *ops = dev->netdev_ops;
L
Linus Torvalds 已提交
1237

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
		/*
		 *	Call the device specific close. This cannot fail.
		 *	Only if device is UP
		 *
		 *	We allow it to be called even after a DETACH hot-plug
		 *	event.
		 */
		if (ops->ndo_stop)
			ops->ndo_stop(dev);

		dev->flags &= ~IFF_UP;
		net_dmaengine_put();
	}

	return 0;
}

static int __dev_close(struct net_device *dev)
{
1257
	int retval;
1258 1259 1260
	LIST_HEAD(single);

	list_add(&dev->unreg_list, &single);
1261 1262 1263
	retval = __dev_close_many(&single);
	list_del(&single);
	return retval;
1264 1265
}

E
Eric Dumazet 已提交
1266
static int dev_close_many(struct list_head *head)
1267 1268 1269
{
	struct net_device *dev, *tmp;
	LIST_HEAD(tmp_list);
L
Linus Torvalds 已提交
1270

1271 1272 1273 1274 1275
	list_for_each_entry_safe(dev, tmp, head, unreg_list)
		if (!(dev->flags & IFF_UP))
			list_move(&dev->unreg_list, &tmp_list);

	__dev_close_many(head);
L
Linus Torvalds 已提交
1276

1277 1278 1279 1280
	list_for_each_entry(dev, head, unreg_list) {
		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
		call_netdevice_notifiers(NETDEV_DOWN, dev);
	}
1281

1282 1283
	/* rollback_registered_many needs the complete original list */
	list_splice(&tmp_list, head);
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
	return 0;
}

/**
 *	dev_close - shutdown an interface.
 *	@dev: device to shutdown
 *
 *	This function moves an active device into down state. A
 *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
 *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
 *	chain.
 */
int dev_close(struct net_device *dev)
{
1298 1299
	if (dev->flags & IFF_UP) {
		LIST_HEAD(single);
L
Linus Torvalds 已提交
1300

1301 1302 1303 1304
		list_add(&dev->unreg_list, &single);
		dev_close_many(&single);
		list_del(&single);
	}
L
Linus Torvalds 已提交
1305 1306
	return 0;
}
E
Eric Dumazet 已提交
1307
EXPORT_SYMBOL(dev_close);
L
Linus Torvalds 已提交
1308 1309


1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
/**
 *	dev_disable_lro - disable Large Receive Offload on a device
 *	@dev: device
 *
 *	Disable Large Receive Offload (LRO) on a net device.  Must be
 *	called under RTNL.  This is needed if received packets may be
 *	forwarded to another interface.
 */
void dev_disable_lro(struct net_device *dev)
{
1320 1321 1322 1323 1324 1325 1326
	/*
	 * If we're trying to disable lro on a vlan device
	 * use the underlying physical device instead
	 */
	if (is_vlan_dev(dev))
		dev = vlan_dev_real_dev(dev);

M
Michał Mirosław 已提交
1327 1328
	dev->wanted_features &= ~NETIF_F_LRO;
	netdev_update_features(dev);
1329

1330 1331
	if (unlikely(dev->features & NETIF_F_LRO))
		netdev_WARN(dev, "failed to disable LRO!\n");
1332 1333 1334 1335
}
EXPORT_SYMBOL(dev_disable_lro);


1336 1337
static int dev_boot_phase = 1;

L
Linus Torvalds 已提交
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
/**
 *	register_netdevice_notifier - register a network notifier block
 *	@nb: notifier
 *
 *	Register a notifier to be called when network device events occur.
 *	The notifier passed is linked into the kernel structures and must
 *	not be reused until it has been unregistered. A negative errno code
 *	is returned on a failure.
 *
 * 	When registered all registration and up events are replayed
1348
 *	to the new notifier to allow device to have a race free
L
Linus Torvalds 已提交
1349 1350 1351 1352 1353 1354
 *	view of the network device list.
 */

int register_netdevice_notifier(struct notifier_block *nb)
{
	struct net_device *dev;
1355
	struct net_device *last;
1356
	struct net *net;
L
Linus Torvalds 已提交
1357 1358 1359
	int err;

	rtnl_lock();
1360
	err = raw_notifier_chain_register(&netdev_chain, nb);
1361 1362
	if (err)
		goto unlock;
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
	if (dev_boot_phase)
		goto unlock;
	for_each_net(net) {
		for_each_netdev(net, dev) {
			err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
			err = notifier_to_errno(err);
			if (err)
				goto rollback;

			if (!(dev->flags & IFF_UP))
				continue;
L
Linus Torvalds 已提交
1374

1375 1376
			nb->notifier_call(nb, NETDEV_UP, dev);
		}
L
Linus Torvalds 已提交
1377
	}
1378 1379

unlock:
L
Linus Torvalds 已提交
1380 1381
	rtnl_unlock();
	return err;
1382 1383 1384

rollback:
	last = dev;
1385 1386 1387
	for_each_net(net) {
		for_each_netdev(net, dev) {
			if (dev == last)
1388
				goto outroll;
1389

1390 1391 1392 1393 1394
			if (dev->flags & IFF_UP) {
				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
				nb->notifier_call(nb, NETDEV_DOWN, dev);
			}
			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1395
			nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1396 1397
		}
	}
1398

1399
outroll:
1400
	raw_notifier_chain_unregister(&netdev_chain, nb);
1401
	goto unlock;
L
Linus Torvalds 已提交
1402
}
E
Eric Dumazet 已提交
1403
EXPORT_SYMBOL(register_netdevice_notifier);
L
Linus Torvalds 已提交
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416

/**
 *	unregister_netdevice_notifier - unregister a network notifier block
 *	@nb: notifier
 *
 *	Unregister a notifier previously registered by
 *	register_netdevice_notifier(). The notifier is unlinked into the
 *	kernel structures and may then be reused. A negative errno code
 *	is returned on a failure.
 */

int unregister_netdevice_notifier(struct notifier_block *nb)
{
1417 1418 1419
	int err;

	rtnl_lock();
1420
	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1421 1422
	rtnl_unlock();
	return err;
L
Linus Torvalds 已提交
1423
}
E
Eric Dumazet 已提交
1424
EXPORT_SYMBOL(unregister_netdevice_notifier);
L
Linus Torvalds 已提交
1425 1426 1427 1428

/**
 *	call_netdevice_notifiers - call all network notifier blocks
 *      @val: value passed unmodified to notifier function
1429
 *      @dev: net_device pointer passed unmodified to notifier function
L
Linus Torvalds 已提交
1430 1431
 *
 *	Call all network notifier blocks.  Parameters and return value
1432
 *	are as for raw_notifier_call_chain().
L
Linus Torvalds 已提交
1433 1434
 */

1435
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
L
Linus Torvalds 已提交
1436
{
1437
	ASSERT_RTNL();
1438
	return raw_notifier_call_chain(&netdev_chain, val, dev);
L
Linus Torvalds 已提交
1439
}
1440
EXPORT_SYMBOL(call_netdevice_notifiers);
L
Linus Torvalds 已提交
1441

1442
static struct static_key netstamp_needed __read_mostly;
1443
#ifdef HAVE_JUMP_LABEL
1444
/* We are not allowed to call static_key_slow_dec() from irq context
1445
 * If net_disable_timestamp() is called from irq context, defer the
1446
 * static_key_slow_dec() calls.
1447 1448 1449
 */
static atomic_t netstamp_needed_deferred;
#endif
L
Linus Torvalds 已提交
1450 1451 1452

void net_enable_timestamp(void)
{
1453 1454 1455 1456 1457
#ifdef HAVE_JUMP_LABEL
	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);

	if (deferred) {
		while (--deferred)
1458
			static_key_slow_dec(&netstamp_needed);
1459 1460 1461 1462
		return;
	}
#endif
	WARN_ON(in_interrupt());
1463
	static_key_slow_inc(&netstamp_needed);
L
Linus Torvalds 已提交
1464
}
E
Eric Dumazet 已提交
1465
EXPORT_SYMBOL(net_enable_timestamp);
L
Linus Torvalds 已提交
1466 1467 1468

void net_disable_timestamp(void)
{
1469 1470 1471 1472 1473 1474
#ifdef HAVE_JUMP_LABEL
	if (in_interrupt()) {
		atomic_inc(&netstamp_needed_deferred);
		return;
	}
#endif
1475
	static_key_slow_dec(&netstamp_needed);
L
Linus Torvalds 已提交
1476
}
E
Eric Dumazet 已提交
1477
EXPORT_SYMBOL(net_disable_timestamp);
L
Linus Torvalds 已提交
1478

E
Eric Dumazet 已提交
1479
static inline void net_timestamp_set(struct sk_buff *skb)
L
Linus Torvalds 已提交
1480
{
1481
	skb->tstamp.tv64 = 0;
1482
	if (static_key_false(&netstamp_needed))
1483
		__net_timestamp(skb);
L
Linus Torvalds 已提交
1484 1485
}

1486
#define net_timestamp_check(COND, SKB)			\
1487
	if (static_key_false(&netstamp_needed)) {		\
1488 1489 1490
		if ((COND) && !(SKB)->tstamp.tv64)	\
			__net_timestamp(SKB);		\
	}						\
E
Eric Dumazet 已提交
1491

1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
static int net_hwtstamp_validate(struct ifreq *ifr)
{
	struct hwtstamp_config cfg;
	enum hwtstamp_tx_types tx_type;
	enum hwtstamp_rx_filters rx_filter;
	int tx_type_valid = 0;
	int rx_filter_valid = 0;

	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
		return -EFAULT;

	if (cfg.flags) /* reserved for future extensions */
		return -EINVAL;

	tx_type = cfg.tx_type;
	rx_filter = cfg.rx_filter;

	switch (tx_type) {
	case HWTSTAMP_TX_OFF:
	case HWTSTAMP_TX_ON:
	case HWTSTAMP_TX_ONESTEP_SYNC:
		tx_type_valid = 1;
		break;
	}

	switch (rx_filter) {
	case HWTSTAMP_FILTER_NONE:
	case HWTSTAMP_FILTER_ALL:
	case HWTSTAMP_FILTER_SOME:
	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
		rx_filter_valid = 1;
		break;
	}

	if (!tx_type_valid || !rx_filter_valid)
		return -ERANGE;

	return 0;
}

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
static inline bool is_skb_forwardable(struct net_device *dev,
				      struct sk_buff *skb)
{
	unsigned int len;

	if (!(dev->flags & IFF_UP))
		return false;

	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
	if (skb->len <= len)
		return true;

	/* if TSO is enabled, we don't care about the length as the packet
	 * could be forwarded without being segmented before
	 */
	if (skb_is_gso(skb))
		return true;

	return false;
}

1564 1565 1566 1567 1568 1569 1570 1571
/**
 * dev_forward_skb - loopback an skb to another netif
 *
 * @dev: destination network device
 * @skb: buffer to forward
 *
 * return values:
 *	NET_RX_SUCCESS	(no congestion)
1572
 *	NET_RX_DROP     (packet was dropped, but freed)
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
 *
 * dev_forward_skb can be used for injecting an skb from the
 * start_xmit function of one device into the receive queue
 * of another device.
 *
 * The receiving device may be in another namespace, so
 * we have to clear all information in the skb that could
 * impact namespace isolation.
 */
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
1584 1585 1586 1587 1588 1589 1590 1591
	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
		if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
			atomic_long_inc(&dev->rx_dropped);
			kfree_skb(skb);
			return NET_RX_DROP;
		}
	}

1592
	skb_orphan(skb);
1593
	nf_reset(skb);
1594

1595
	if (unlikely(!is_skb_forwardable(dev, skb))) {
1596
		atomic_long_inc(&dev->rx_dropped);
1597
		kfree_skb(skb);
1598
		return NET_RX_DROP;
1599
	}
1600
	skb_set_dev(skb, dev);
1601 1602 1603 1604 1605 1606 1607
	skb->tstamp.tv64 = 0;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, dev);
	return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);

1608 1609 1610 1611 1612 1613 1614 1615
static inline int deliver_skb(struct sk_buff *skb,
			      struct packet_type *pt_prev,
			      struct net_device *orig_dev)
{
	atomic_inc(&skb->users);
	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}

L
Linus Torvalds 已提交
1616 1617 1618 1619 1620
/*
 *	Support routine. Sends outgoing frames to any network
 *	taps currently in use.
 */

1621
static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
1622 1623
{
	struct packet_type *ptype;
1624 1625
	struct sk_buff *skb2 = NULL;
	struct packet_type *pt_prev = NULL;
1626

L
Linus Torvalds 已提交
1627 1628 1629 1630 1631 1632 1633 1634
	rcu_read_lock();
	list_for_each_entry_rcu(ptype, &ptype_all, list) {
		/* Never send packets back to the socket
		 * they originated from - MvS (miquels@drinkel.ow.org)
		 */
		if ((ptype->dev == dev || !ptype->dev) &&
		    (ptype->af_packet_priv == NULL ||
		     (struct sock *)ptype->af_packet_priv != skb->sk)) {
1635 1636 1637 1638 1639 1640 1641
			if (pt_prev) {
				deliver_skb(skb2, pt_prev, skb->dev);
				pt_prev = ptype;
				continue;
			}

			skb2 = skb_clone(skb, GFP_ATOMIC);
L
Linus Torvalds 已提交
1642 1643 1644
			if (!skb2)
				break;

1645 1646
			net_timestamp_set(skb2);

L
Linus Torvalds 已提交
1647 1648 1649 1650
			/* skb->nh should be correctly
			   set by sender, so that the second statement is
			   just protection against buggy protocols.
			 */
1651
			skb_reset_mac_header(skb2);
L
Linus Torvalds 已提交
1652

1653
			if (skb_network_header(skb2) < skb2->data ||
1654
			    skb2->network_header > skb2->tail) {
L
Linus Torvalds 已提交
1655
				if (net_ratelimit())
1656 1657 1658
					pr_crit("protocol %04x is buggy, dev %s\n",
						ntohs(skb2->protocol),
						dev->name);
1659
				skb_reset_network_header(skb2);
L
Linus Torvalds 已提交
1660 1661
			}

1662
			skb2->transport_header = skb2->network_header;
L
Linus Torvalds 已提交
1663
			skb2->pkt_type = PACKET_OUTGOING;
1664
			pt_prev = ptype;
L
Linus Torvalds 已提交
1665 1666
		}
	}
1667 1668
	if (pt_prev)
		pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
L
Linus Torvalds 已提交
1669 1670 1671
	rcu_read_unlock();
}

1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
 * @dev: Network device
 * @txq: number of queues available
 *
 * If real_num_tx_queues is changed the tc mappings may no longer be
 * valid. To resolve this verify the tc mapping remains valid and if
 * not NULL the mapping. With no priorities mapping to this
 * offset/count pair it will no longer be used. In the worst case TC0
 * is invalid nothing can be done so disable priority mappings. If is
 * expected that drivers will fix this mapping if they can before
 * calling netif_set_real_num_tx_queues.
 */
E
Eric Dumazet 已提交
1684
static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1685 1686 1687 1688 1689 1690
{
	int i;
	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];

	/* If TC0 is invalidated disable TC mapping */
	if (tc->offset + tc->count > txq) {
1691
		pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
		dev->num_tc = 0;
		return;
	}

	/* Invalidated prio to tc mappings set to TC0 */
	for (i = 1; i < TC_BITMASK + 1; i++) {
		int q = netdev_get_prio_tc_map(dev, i);

		tc = &dev->tc_to_txq[q];
		if (tc->offset + tc->count > txq) {
1702 1703
			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
				i, q);
1704 1705 1706 1707 1708
			netdev_set_prio_tc_map(dev, i, 0);
		}
	}
}

1709 1710 1711 1712
/*
 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
 */
1713
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1714
{
T
Tom Herbert 已提交
1715 1716
	int rc;

1717 1718
	if (txq < 1 || txq > dev->num_tx_queues)
		return -EINVAL;
1719

1720 1721
	if (dev->reg_state == NETREG_REGISTERED ||
	    dev->reg_state == NETREG_UNREGISTERING) {
1722 1723
		ASSERT_RTNL();

T
Tom Herbert 已提交
1724 1725
		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
						  txq);
T
Tom Herbert 已提交
1726 1727 1728
		if (rc)
			return rc;

1729 1730 1731
		if (dev->num_tc)
			netif_setup_tc(dev, txq);

1732 1733
		if (txq < dev->real_num_tx_queues)
			qdisc_reset_all_tx_gt(dev, txq);
1734
	}
1735 1736 1737

	dev->real_num_tx_queues = txq;
	return 0;
1738 1739
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1740

1741 1742 1743 1744 1745 1746 1747 1748
#ifdef CONFIG_RPS
/**
 *	netif_set_real_num_rx_queues - set actual number of RX queues used
 *	@dev: Network device
 *	@rxq: Actual number of RX queues
 *
 *	This must be called either with the rtnl_lock held or before
 *	registration of the net device.  Returns 0 on success, or a
1749 1750
 *	negative error code.  If called before registration, it always
 *	succeeds.
1751 1752 1753 1754 1755
 */
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
{
	int rc;

T
Tom Herbert 已提交
1756 1757 1758
	if (rxq < 1 || rxq > dev->num_rx_queues)
		return -EINVAL;

1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
	if (dev->reg_state == NETREG_REGISTERED) {
		ASSERT_RTNL();

		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
						  rxq);
		if (rc)
			return rc;
	}

	dev->real_num_rx_queues = rxq;
	return 0;
}
EXPORT_SYMBOL(netif_set_real_num_rx_queues);
#endif

1774
static inline void __netif_reschedule(struct Qdisc *q)
1775
{
1776 1777
	struct softnet_data *sd;
	unsigned long flags;
1778

1779 1780
	local_irq_save(flags);
	sd = &__get_cpu_var(softnet_data);
1781 1782 1783
	q->next_sched = NULL;
	*sd->output_queue_tailp = q;
	sd->output_queue_tailp = &q->next_sched;
1784 1785 1786 1787 1788 1789 1790 1791
	raise_softirq_irqoff(NET_TX_SOFTIRQ);
	local_irq_restore(flags);
}

void __netif_schedule(struct Qdisc *q)
{
	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
		__netif_reschedule(q);
1792 1793 1794
}
EXPORT_SYMBOL(__netif_schedule);

1795
void dev_kfree_skb_irq(struct sk_buff *skb)
1796
{
1797
	if (atomic_dec_and_test(&skb->users)) {
1798 1799
		struct softnet_data *sd;
		unsigned long flags;
1800

1801 1802 1803 1804 1805 1806 1807
		local_irq_save(flags);
		sd = &__get_cpu_var(softnet_data);
		skb->next = sd->completion_queue;
		sd->completion_queue = skb;
		raise_softirq_irqoff(NET_TX_SOFTIRQ);
		local_irq_restore(flags);
	}
1808
}
1809
EXPORT_SYMBOL(dev_kfree_skb_irq);
1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820

void dev_kfree_skb_any(struct sk_buff *skb)
{
	if (in_irq() || irqs_disabled())
		dev_kfree_skb_irq(skb);
	else
		dev_kfree_skb(skb);
}
EXPORT_SYMBOL(dev_kfree_skb_any);


1821 1822 1823 1824 1825 1826
/**
 * netif_device_detach - mark device as removed
 * @dev: network device
 *
 * Mark device as removed from system and therefore no longer available.
 */
1827 1828 1829 1830
void netif_device_detach(struct net_device *dev)
{
	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
	    netif_running(dev)) {
1831
		netif_tx_stop_all_queues(dev);
1832 1833 1834 1835
	}
}
EXPORT_SYMBOL(netif_device_detach);

1836 1837 1838 1839 1840 1841
/**
 * netif_device_attach - mark device as attached
 * @dev: network device
 *
 * Mark device as attached from system and restart if needed.
 */
1842 1843 1844 1845
void netif_device_attach(struct net_device *dev)
{
	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
	    netif_running(dev)) {
1846
		netif_tx_wake_all_queues(dev);
1847
		__netdev_watchdog_up(dev);
1848 1849 1850 1851
	}
}
EXPORT_SYMBOL(netif_device_attach);

1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881
/**
 * skb_dev_set -- assign a new device to a buffer
 * @skb: buffer for the new device
 * @dev: network device
 *
 * If an skb is owned by a device already, we have to reset
 * all data private to the namespace a device belongs to
 * before assigning it a new device.
 */
#ifdef CONFIG_NET_NS
void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
{
	skb_dst_drop(skb);
	if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
		secpath_reset(skb);
		nf_reset(skb);
		skb_init_secmark(skb);
		skb->mark = 0;
		skb->priority = 0;
		skb->nf_trace = 0;
		skb->ipvs_property = 0;
#ifdef CONFIG_NET_SCHED
		skb->tc_index = 0;
#endif
	}
	skb->dev = dev;
}
EXPORT_SYMBOL(skb_set_dev);
#endif /* CONFIG_NET_NS */

1882 1883
static void skb_warn_bad_offload(const struct sk_buff *skb)
{
1884
	static const netdev_features_t null_features = 0;
1885 1886 1887 1888 1889 1890 1891 1892
	struct net_device *dev = skb->dev;
	const char *driver = "";

	if (dev && dev->dev.parent)
		driver = dev_driver_string(dev->dev.parent);

	WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
	     "gso_type=%d ip_summed=%d\n",
1893 1894
	     driver, dev ? &dev->features : &null_features,
	     skb->sk ? &skb->sk->sk_route_caps : &null_features,
1895 1896 1897 1898
	     skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
	     skb_shinfo(skb)->gso_type, skb->ip_summed);
}

L
Linus Torvalds 已提交
1899 1900 1901 1902
/*
 * Invalidate hardware checksum when packet is to be mangled, and
 * complete checksum manually on outgoing path.
 */
1903
int skb_checksum_help(struct sk_buff *skb)
L
Linus Torvalds 已提交
1904
{
1905
	__wsum csum;
1906
	int ret = 0, offset;
L
Linus Torvalds 已提交
1907

1908
	if (skb->ip_summed == CHECKSUM_COMPLETE)
1909 1910 1911
		goto out_set_summed;

	if (unlikely(skb_shinfo(skb)->gso_size)) {
1912 1913
		skb_warn_bad_offload(skb);
		return -EINVAL;
L
Linus Torvalds 已提交
1914 1915
	}

1916
	offset = skb_checksum_start_offset(skb);
1917 1918 1919 1920 1921 1922 1923 1924
	BUG_ON(offset >= skb_headlen(skb));
	csum = skb_checksum(skb, offset, skb->len - offset, 0);

	offset += skb->csum_offset;
	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));

	if (skb_cloned(skb) &&
	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {
L
Linus Torvalds 已提交
1925 1926 1927 1928 1929
		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
		if (ret)
			goto out;
	}

1930
	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
1931
out_set_summed:
L
Linus Torvalds 已提交
1932
	skb->ip_summed = CHECKSUM_NONE;
1933
out:
L
Linus Torvalds 已提交
1934 1935
	return ret;
}
E
Eric Dumazet 已提交
1936
EXPORT_SYMBOL(skb_checksum_help);
L
Linus Torvalds 已提交
1937

1938 1939 1940
/**
 *	skb_gso_segment - Perform segmentation on skb.
 *	@skb: buffer to segment
1941
 *	@features: features for the output path (see dev->features)
1942 1943
 *
 *	This function segments the given skb and returns a list of segments.
1944 1945 1946
 *
 *	It may return NULL if the skb requires no segmentation.  This is
 *	only possible when GSO is used for verifying header integrity.
1947
 */
1948 1949
struct sk_buff *skb_gso_segment(struct sk_buff *skb,
	netdev_features_t features)
1950 1951 1952
{
	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
	struct packet_type *ptype;
A
Al Viro 已提交
1953
	__be16 type = skb->protocol;
1954
	int vlan_depth = ETH_HLEN;
1955
	int err;
1956

1957 1958
	while (type == htons(ETH_P_8021Q)) {
		struct vlan_hdr *vh;
1959

1960
		if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1961 1962
			return ERR_PTR(-EINVAL);

1963 1964 1965
		vh = (struct vlan_hdr *)(skb->data + vlan_depth);
		type = vh->h_vlan_encapsulated_proto;
		vlan_depth += VLAN_HLEN;
1966 1967
	}

1968
	skb_reset_mac_header(skb);
1969
	skb->mac_len = skb->network_header - skb->mac_header;
1970 1971
	__skb_pull(skb, skb->mac_len);

1972
	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1973
		skb_warn_bad_offload(skb);
1974

1975 1976 1977 1978 1979
		if (skb_header_cloned(skb) &&
		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
			return ERR_PTR(err);
	}

1980
	rcu_read_lock();
1981 1982
	list_for_each_entry_rcu(ptype,
			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1983
		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1984
			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1985 1986 1987 1988
				err = ptype->gso_send_check(skb);
				segs = ERR_PTR(err);
				if (err || skb_gso_ok(skb, features))
					break;
1989 1990
				__skb_push(skb, (skb->data -
						 skb_network_header(skb)));
1991
			}
1992
			segs = ptype->gso_segment(skb, features);
1993 1994 1995 1996 1997
			break;
		}
	}
	rcu_read_unlock();

1998
	__skb_push(skb, skb->data - skb_mac_header(skb));
1999

2000 2001 2002 2003
	return segs;
}
EXPORT_SYMBOL(skb_gso_segment);

2004 2005 2006 2007 2008
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
void netdev_rx_csum_fault(struct net_device *dev)
{
	if (net_ratelimit()) {
2009
		pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2010 2011 2012 2013 2014 2015
		dump_stack();
	}
}
EXPORT_SYMBOL(netdev_rx_csum_fault);
#endif

L
Linus Torvalds 已提交
2016 2017 2018 2019 2020
/* Actually, we should eliminate this check as soon as we know, that:
 * 1. IOMMU is present and allows to map all the memory.
 * 2. No high memory really exists on this machine.
 */

E
Eric Dumazet 已提交
2021
static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
L
Linus Torvalds 已提交
2022
{
2023
#ifdef CONFIG_HIGHMEM
L
Linus Torvalds 已提交
2024
	int i;
2025
	if (!(dev->features & NETIF_F_HIGHDMA)) {
2026 2027 2028
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			if (PageHighMem(skb_frag_page(frag)))
2029
				return 1;
2030
		}
2031
	}
L
Linus Torvalds 已提交
2032

2033 2034
	if (PCI_DMA_BUS_IS_PHYS) {
		struct device *pdev = dev->dev.parent;
L
Linus Torvalds 已提交
2035

E
Eric Dumazet 已提交
2036 2037
		if (!pdev)
			return 0;
2038
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2039 2040
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2041 2042 2043 2044
			if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
				return 1;
		}
	}
2045
#endif
L
Linus Torvalds 已提交
2046 2047 2048
	return 0;
}

2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
struct dev_gso_cb {
	void (*destructor)(struct sk_buff *skb);
};

#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)

static void dev_gso_skb_destructor(struct sk_buff *skb)
{
	struct dev_gso_cb *cb;

	do {
		struct sk_buff *nskb = skb->next;

		skb->next = nskb->next;
		nskb->next = NULL;
		kfree_skb(nskb);
	} while (skb->next);

	cb = DEV_GSO_CB(skb);
	if (cb->destructor)
		cb->destructor(skb);
}

/**
 *	dev_gso_segment - Perform emulated hardware segmentation on skb.
 *	@skb: buffer to segment
2075
 *	@features: device features as applicable to this skb
2076 2077 2078 2079
 *
 *	This function segments the given skb and stores the list of segments
 *	in skb->next.
 */
2080
static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2081 2082
{
	struct sk_buff *segs;
2083 2084 2085 2086 2087 2088

	segs = skb_gso_segment(skb, features);

	/* Verifying header integrity only. */
	if (!segs)
		return 0;
2089

2090
	if (IS_ERR(segs))
2091 2092 2093 2094 2095 2096 2097 2098 2099
		return PTR_ERR(segs);

	skb->next = segs;
	DEV_GSO_CB(skb)->destructor = skb->destructor;
	skb->destructor = dev_gso_skb_destructor;

	return 0;
}

E
Eric Dumazet 已提交
2100 2101
/*
 * Try to orphan skb early, right before transmission by the device.
2102 2103
 * We cannot orphan skb if tx timestamp is requested or the sk-reference
 * is needed on driver level for other reasons, e.g. see net/can/raw.c
E
Eric Dumazet 已提交
2104 2105 2106
 */
static inline void skb_orphan_try(struct sk_buff *skb)
{
2107 2108
	struct sock *sk = skb->sk;

2109
	if (sk && !skb_shinfo(skb)->tx_flags) {
2110 2111 2112 2113 2114
		/* skb_tx_hash() wont be able to get sk.
		 * We copy sk_hash into skb->rxhash
		 */
		if (!skb->rxhash)
			skb->rxhash = sk->sk_hash;
E
Eric Dumazet 已提交
2115
		skb_orphan(skb);
2116
	}
E
Eric Dumazet 已提交
2117 2118
}

2119
static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
{
	return ((features & NETIF_F_GEN_CSUM) ||
		((features & NETIF_F_V4_CSUM) &&
		 protocol == htons(ETH_P_IP)) ||
		((features & NETIF_F_V6_CSUM) &&
		 protocol == htons(ETH_P_IPV6)) ||
		((features & NETIF_F_FCOE_CRC) &&
		 protocol == htons(ETH_P_FCOE)));
}

2130 2131
static netdev_features_t harmonize_features(struct sk_buff *skb,
	__be16 protocol, netdev_features_t features)
2132
{
2133
	if (!can_checksum_protocol(features, protocol)) {
2134 2135 2136 2137 2138 2139 2140 2141 2142
		features &= ~NETIF_F_ALL_CSUM;
		features &= ~NETIF_F_SG;
	} else if (illegal_highdma(skb->dev, skb)) {
		features &= ~NETIF_F_SG;
	}

	return features;
}

2143
netdev_features_t netif_skb_features(struct sk_buff *skb)
2144 2145
{
	__be16 protocol = skb->protocol;
2146
	netdev_features_t features = skb->dev->features;
2147 2148 2149 2150

	if (protocol == htons(ETH_P_8021Q)) {
		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
		protocol = veh->h_vlan_encapsulated_proto;
2151 2152 2153
	} else if (!vlan_tx_tag_present(skb)) {
		return harmonize_features(skb, protocol, features);
	}
2154

2155
	features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2156 2157 2158 2159 2160

	if (protocol != htons(ETH_P_8021Q)) {
		return harmonize_features(skb, protocol, features);
	} else {
		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2161
				NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2162 2163
		return harmonize_features(skb, protocol, features);
	}
2164
}
2165
EXPORT_SYMBOL(netif_skb_features);
2166

2167 2168 2169 2170 2171 2172 2173 2174
/*
 * Returns true if either:
 *	1. skb has frag_list and the device doesn't support FRAGLIST, or
 *	2. skb is fragmented and the device does not support SG, or if
 *	   at least one of fragments is in highmem and device does not
 *	   support DMA from it.
 */
static inline int skb_needs_linearize(struct sk_buff *skb,
2175
				      int features)
2176
{
2177 2178 2179
	return skb_is_nonlinear(skb) &&
			((skb_has_frag_list(skb) &&
				!(features & NETIF_F_FRAGLIST)) ||
2180
			(skb_shinfo(skb)->nr_frags &&
2181
				!(features & NETIF_F_SG)));
2182 2183
}

2184 2185
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
			struct netdev_queue *txq)
2186
{
2187
	const struct net_device_ops *ops = dev->netdev_ops;
2188
	int rc = NETDEV_TX_OK;
2189
	unsigned int skb_len;
2190

2191
	if (likely(!skb->next)) {
2192
		netdev_features_t features;
2193

2194
		/*
L
Lucas De Marchi 已提交
2195
		 * If device doesn't need skb->dst, release it right now while
2196 2197
		 * its hot in this cpu cache
		 */
E
Eric Dumazet 已提交
2198 2199 2200
		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
			skb_dst_drop(skb);

2201 2202 2203
		if (!list_empty(&ptype_all))
			dev_queue_xmit_nit(skb, dev);

E
Eric Dumazet 已提交
2204
		skb_orphan_try(skb);
2205

2206 2207
		features = netif_skb_features(skb);

2208
		if (vlan_tx_tag_present(skb) &&
2209
		    !(features & NETIF_F_HW_VLAN_TX)) {
2210 2211 2212 2213 2214 2215 2216
			skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
			if (unlikely(!skb))
				goto out;

			skb->vlan_tci = 0;
		}

2217
		if (netif_needs_gso(skb, features)) {
2218
			if (unlikely(dev_gso_segment(skb, features)))
2219 2220 2221
				goto out_kfree_skb;
			if (skb->next)
				goto gso;
2222
		} else {
2223
			if (skb_needs_linearize(skb, features) &&
2224 2225 2226 2227 2228 2229 2230 2231
			    __skb_linearize(skb))
				goto out_kfree_skb;

			/* If packet is not checksummed and device does not
			 * support checksumming for this protocol, complete
			 * checksumming here.
			 */
			if (skb->ip_summed == CHECKSUM_PARTIAL) {
2232 2233
				skb_set_transport_header(skb,
					skb_checksum_start_offset(skb));
2234
				if (!(features & NETIF_F_ALL_CSUM) &&
2235 2236 2237
				     skb_checksum_help(skb))
					goto out_kfree_skb;
			}
2238 2239
		}

2240
		skb_len = skb->len;
2241
		rc = ops->ndo_start_xmit(skb, dev);
2242
		trace_net_dev_xmit(skb, rc, dev, skb_len);
2243
		if (rc == NETDEV_TX_OK)
E
Eric Dumazet 已提交
2244
			txq_trans_update(txq);
2245
		return rc;
2246 2247
	}

2248
gso:
2249 2250 2251 2252 2253
	do {
		struct sk_buff *nskb = skb->next;

		skb->next = nskb->next;
		nskb->next = NULL;
2254 2255

		/*
L
Lucas De Marchi 已提交
2256
		 * If device doesn't need nskb->dst, release it right now while
2257 2258 2259 2260 2261
		 * its hot in this cpu cache
		 */
		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
			skb_dst_drop(nskb);

2262
		skb_len = nskb->len;
2263
		rc = ops->ndo_start_xmit(nskb, dev);
2264
		trace_net_dev_xmit(nskb, rc, dev, skb_len);
2265
		if (unlikely(rc != NETDEV_TX_OK)) {
2266 2267
			if (rc & ~NETDEV_TX_MASK)
				goto out_kfree_gso_skb;
2268
			nskb->next = skb->next;
2269 2270 2271
			skb->next = nskb;
			return rc;
		}
E
Eric Dumazet 已提交
2272
		txq_trans_update(txq);
2273
		if (unlikely(netif_xmit_stopped(txq) && skb->next))
2274
			return NETDEV_TX_BUSY;
2275
	} while (skb->next);
2276

2277 2278 2279
out_kfree_gso_skb:
	if (likely(skb->next == NULL))
		skb->destructor = DEV_GSO_CB(skb)->destructor;
2280 2281
out_kfree_skb:
	kfree_skb(skb);
2282
out:
2283
	return rc;
2284 2285
}

T
Tom Herbert 已提交
2286
static u32 hashrnd __read_mostly;
D
David S. Miller 已提交
2287

2288 2289 2290 2291 2292 2293
/*
 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
 * to be used as a distribution range.
 */
u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
		  unsigned int num_tx_queues)
2294
{
2295
	u32 hash;
2296 2297
	u16 qoffset = 0;
	u16 qcount = num_tx_queues;
D
David S. Miller 已提交
2298

2299 2300
	if (skb_rx_queue_recorded(skb)) {
		hash = skb_get_rx_queue(skb);
2301 2302
		while (unlikely(hash >= num_tx_queues))
			hash -= num_tx_queues;
2303 2304
		return hash;
	}
2305

2306 2307 2308 2309 2310 2311
	if (dev->num_tc) {
		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
		qoffset = dev->tc_to_txq[tc].offset;
		qcount = dev->tc_to_txq[tc].count;
	}

2312
	if (skb->sk && skb->sk->sk_hash)
2313
		hash = skb->sk->sk_hash;
2314
	else
2315
		hash = (__force u16) skb->protocol ^ skb->rxhash;
T
Tom Herbert 已提交
2316
	hash = jhash_1word(hash, hashrnd);
D
David S. Miller 已提交
2317

2318
	return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2319
}
2320
EXPORT_SYMBOL(__skb_tx_hash);
2321

2322 2323 2324 2325
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
		if (net_ratelimit()) {
2326 2327 2328
			pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
				dev->name, queue_index,
				dev->real_num_tx_queues);
2329 2330 2331 2332 2333 2334
		}
		return 0;
	}
	return queue_index;
}

T
Tom Herbert 已提交
2335 2336
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
{
T
Tom Herbert 已提交
2337
#ifdef CONFIG_XPS
T
Tom Herbert 已提交
2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372
	struct xps_dev_maps *dev_maps;
	struct xps_map *map;
	int queue_index = -1;

	rcu_read_lock();
	dev_maps = rcu_dereference(dev->xps_maps);
	if (dev_maps) {
		map = rcu_dereference(
		    dev_maps->cpu_map[raw_smp_processor_id()]);
		if (map) {
			if (map->len == 1)
				queue_index = map->queues[0];
			else {
				u32 hash;
				if (skb->sk && skb->sk->sk_hash)
					hash = skb->sk->sk_hash;
				else
					hash = (__force u16) skb->protocol ^
					    skb->rxhash;
				hash = jhash_1word(hash, hashrnd);
				queue_index = map->queues[
				    ((u64)hash * map->len) >> 32];
			}
			if (unlikely(queue_index >= dev->real_num_tx_queues))
				queue_index = -1;
		}
	}
	rcu_read_unlock();

	return queue_index;
#else
	return -1;
#endif
}

2373 2374 2375
static struct netdev_queue *dev_pick_tx(struct net_device *dev,
					struct sk_buff *skb)
{
2376
	int queue_index;
2377
	const struct net_device_ops *ops = dev->netdev_ops;
2378

2379 2380 2381
	if (dev->real_num_tx_queues == 1)
		queue_index = 0;
	else if (ops->ndo_select_queue) {
2382 2383 2384 2385 2386
		queue_index = ops->ndo_select_queue(dev, skb);
		queue_index = dev_cap_txqueue(dev, queue_index);
	} else {
		struct sock *sk = skb->sk;
		queue_index = sk_tx_queue_get(sk);
2387

2388 2389 2390
		if (queue_index < 0 || skb->ooo_okay ||
		    queue_index >= dev->real_num_tx_queues) {
			int old_index = queue_index;
2391

T
Tom Herbert 已提交
2392 2393 2394
			queue_index = get_xps_queue(dev, skb);
			if (queue_index < 0)
				queue_index = skb_tx_hash(dev, skb);
2395 2396 2397 2398

			if (queue_index != old_index && sk) {
				struct dst_entry *dst =
				    rcu_dereference_check(sk->sk_dst_cache, 1);
E
Eric Dumazet 已提交
2399 2400 2401 2402

				if (dst && skb_dst(skb) == dst)
					sk_tx_queue_set(sk, queue_index);
			}
2403 2404
		}
	}
2405

2406 2407
	skb_set_queue_mapping(skb, queue_index);
	return netdev_get_tx_queue(dev, queue_index);
2408 2409
}

2410 2411 2412 2413 2414
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
				 struct net_device *dev,
				 struct netdev_queue *txq)
{
	spinlock_t *root_lock = qdisc_lock(q);
E
Eric Dumazet 已提交
2415
	bool contended;
2416 2417
	int rc;

E
Eric Dumazet 已提交
2418 2419
	qdisc_skb_cb(skb)->pkt_len = skb->len;
	qdisc_calculate_pkt_len(skb, q);
2420 2421 2422 2423 2424 2425
	/*
	 * Heuristic to force contended enqueues to serialize on a
	 * separate lock before trying to get qdisc main lock.
	 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
	 * and dequeue packets faster.
	 */
E
Eric Dumazet 已提交
2426
	contended = qdisc_is_running(q);
2427 2428 2429
	if (unlikely(contended))
		spin_lock(&q->busylock);

2430 2431 2432 2433 2434
	spin_lock(root_lock);
	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
		kfree_skb(skb);
		rc = NET_XMIT_DROP;
	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2435
		   qdisc_run_begin(q)) {
2436 2437 2438 2439 2440
		/*
		 * This is a work-conserving queue; there are no old skbs
		 * waiting to be sent out; and the qdisc is not running -
		 * xmit the skb directly.
		 */
E
Eric Dumazet 已提交
2441 2442
		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
			skb_dst_force(skb);
2443 2444 2445

		qdisc_bstats_update(q, skb);

2446 2447 2448 2449 2450
		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
			if (unlikely(contended)) {
				spin_unlock(&q->busylock);
				contended = false;
			}
2451
			__qdisc_run(q);
2452
		} else
2453
			qdisc_run_end(q);
2454 2455 2456

		rc = NET_XMIT_SUCCESS;
	} else {
E
Eric Dumazet 已提交
2457
		skb_dst_force(skb);
E
Eric Dumazet 已提交
2458
		rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2459 2460 2461 2462 2463 2464 2465
		if (qdisc_run_begin(q)) {
			if (unlikely(contended)) {
				spin_unlock(&q->busylock);
				contended = false;
			}
			__qdisc_run(q);
		}
2466 2467
	}
	spin_unlock(root_lock);
2468 2469
	if (unlikely(contended))
		spin_unlock(&q->busylock);
2470 2471 2472
	return rc;
}

2473 2474 2475
#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
static void skb_update_prio(struct sk_buff *skb)
{
I
Igor Maravic 已提交
2476
	struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2477 2478 2479 2480 2481 2482 2483 2484

	if ((!skb->priority) && (skb->sk) && map)
		skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
}
#else
#define skb_update_prio(skb)
#endif

2485
static DEFINE_PER_CPU(int, xmit_recursion);
2486
#define RECURSION_LIMIT 10
2487

2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
/**
 *	dev_queue_xmit - transmit a buffer
 *	@skb: buffer to transmit
 *
 *	Queue a buffer for transmission to a network device. The caller must
 *	have set the device and priority and built the buffer before calling
 *	this function. The function can be called from an interrupt.
 *
 *	A negative errno code is returned on a failure. A success does not
 *	guarantee the frame will be transmitted as it may be dropped due
 *	to congestion or traffic shaping.
 *
 * -----------------------------------------------------------------------------------
 *      I notice this method can also return errors from the queue disciplines,
 *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
 *      be positive.
 *
 *      Regardless of the return value, the skb is consumed, so it is currently
 *      difficult to retry a send to this method.  (You can bump the ref count
 *      before sending to hold a reference for retry if you are careful.)
 *
 *      When calling this method, interrupts MUST be enabled.  This is because
 *      the BH enable code must have IRQs enabled so that it will not deadlock.
 *          --BLG
 */
L
Linus Torvalds 已提交
2513 2514 2515
int dev_queue_xmit(struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
2516
	struct netdev_queue *txq;
L
Linus Torvalds 已提交
2517 2518 2519
	struct Qdisc *q;
	int rc = -ENOMEM;

2520 2521
	/* Disable soft irqs for various locks below. Also
	 * stops preemption for RCU.
L
Linus Torvalds 已提交
2522
	 */
2523
	rcu_read_lock_bh();
L
Linus Torvalds 已提交
2524

2525 2526
	skb_update_prio(skb);

2527
	txq = dev_pick_tx(dev, skb);
2528
	q = rcu_dereference_bh(txq->qdisc);
2529

L
Linus Torvalds 已提交
2530
#ifdef CONFIG_NET_CLS_ACT
E
Eric Dumazet 已提交
2531
	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
L
Linus Torvalds 已提交
2532
#endif
2533
	trace_net_dev_queue(skb);
L
Linus Torvalds 已提交
2534
	if (q->enqueue) {
2535
		rc = __dev_xmit_skb(skb, q, dev, txq);
2536
		goto out;
L
Linus Torvalds 已提交
2537 2538 2539 2540 2541
	}

	/* The device has no queue. Common case for software devices:
	   loopback, all the sorts of tunnels...

H
Herbert Xu 已提交
2542 2543
	   Really, it is unlikely that netif_tx_lock protection is necessary
	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
L
Linus Torvalds 已提交
2544 2545 2546 2547 2548 2549 2550 2551 2552 2553
	   counters.)
	   However, it is possible, that they rely on protection
	   made by us here.

	   Check this and shot the lock. It is not prone from deadlocks.
	   Either shot noqueue qdisc, it is even simpler 8)
	 */
	if (dev->flags & IFF_UP) {
		int cpu = smp_processor_id(); /* ok because BHs are off */

2554
		if (txq->xmit_lock_owner != cpu) {
L
Linus Torvalds 已提交
2555

2556 2557 2558
			if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
				goto recursion_alert;

2559
			HARD_TX_LOCK(dev, txq, cpu);
L
Linus Torvalds 已提交
2560

2561
			if (!netif_xmit_stopped(txq)) {
2562
				__this_cpu_inc(xmit_recursion);
2563
				rc = dev_hard_start_xmit(skb, dev, txq);
2564
				__this_cpu_dec(xmit_recursion);
2565
				if (dev_xmit_complete(rc)) {
2566
					HARD_TX_UNLOCK(dev, txq);
L
Linus Torvalds 已提交
2567 2568 2569
					goto out;
				}
			}
2570
			HARD_TX_UNLOCK(dev, txq);
L
Linus Torvalds 已提交
2571
			if (net_ratelimit())
2572 2573
				pr_crit("Virtual device %s asks to queue packet!\n",
					dev->name);
L
Linus Torvalds 已提交
2574 2575
		} else {
			/* Recursion is detected! It is possible,
2576 2577 2578
			 * unfortunately
			 */
recursion_alert:
L
Linus Torvalds 已提交
2579
			if (net_ratelimit())
2580 2581
				pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
					dev->name);
L
Linus Torvalds 已提交
2582 2583 2584 2585
		}
	}

	rc = -ENETDOWN;
2586
	rcu_read_unlock_bh();
L
Linus Torvalds 已提交
2587 2588 2589 2590

	kfree_skb(skb);
	return rc;
out:
2591
	rcu_read_unlock_bh();
L
Linus Torvalds 已提交
2592 2593
	return rc;
}
E
Eric Dumazet 已提交
2594
EXPORT_SYMBOL(dev_queue_xmit);
L
Linus Torvalds 已提交
2595 2596 2597 2598 2599 2600


/*=======================================================================
			Receiver routines
  =======================================================================*/

2601
int netdev_max_backlog __read_mostly = 1000;
E
Eric Dumazet 已提交
2602
int netdev_tstamp_prequeue __read_mostly = 1;
2603 2604
int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64;            /* old backlog weight */
L
Linus Torvalds 已提交
2605

E
Eric Dumazet 已提交
2606 2607 2608 2609 2610 2611 2612 2613
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
				     struct napi_struct *napi)
{
	list_add_tail(&napi->poll_list, &sd->poll_list);
	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
}

T
Tom Herbert 已提交
2614
/*
2615
 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2616 2617 2618
 * and src/dst port numbers.  Sets rxhash in skb to non-zero hash value
 * on success, zero indicates no valid hash.  Also, sets l4_rxhash in skb
 * if hash is a canonical 4-tuple hash over transport ports.
T
Tom Herbert 已提交
2619
 */
2620
void __skb_get_rxhash(struct sk_buff *skb)
T
Tom Herbert 已提交
2621
{
2622 2623
	struct flow_keys keys;
	u32 hash;
2624

2625 2626
	if (!skb_flow_dissect(skb, &keys))
		return;
2627

2628 2629 2630 2631
	if (keys.ports) {
		if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
			swap(keys.port16[0], keys.port16[1]);
		skb->l4_rxhash = 1;
T
Tom Herbert 已提交
2632 2633
	}

E
Eric Dumazet 已提交
2634
	/* get a consistent hash (same value on both flow directions) */
2635 2636
	if ((__force u32)keys.dst < (__force u32)keys.src)
		swap(keys.dst, keys.src);
T
Tom Herbert 已提交
2637

2638 2639 2640
	hash = jhash_3words((__force u32)keys.dst,
			    (__force u32)keys.src,
			    (__force u32)keys.ports, hashrnd);
2641 2642 2643
	if (!hash)
		hash = 1;

2644
	skb->rxhash = hash;
2645 2646 2647 2648 2649 2650
}
EXPORT_SYMBOL(__skb_get_rxhash);

#ifdef CONFIG_RPS

/* One global table that all flow-based protocols share. */
E
Eric Dumazet 已提交
2651
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2652 2653
EXPORT_SYMBOL(rps_sock_flow_table);

2654
struct static_key rps_needed __read_mostly;
2655

2656 2657 2658 2659
static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
	    struct rps_dev_flow *rflow, u16 next_cpu)
{
2660
	if (next_cpu != RPS_NO_CPU) {
2661 2662 2663 2664 2665 2666 2667 2668 2669
#ifdef CONFIG_RFS_ACCEL
		struct netdev_rx_queue *rxqueue;
		struct rps_dev_flow_table *flow_table;
		struct rps_dev_flow *old_rflow;
		u32 flow_id;
		u16 rxq_index;
		int rc;

		/* Should we steer this flow to a different hardware queue? */
2670 2671
		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
		    !(dev->features & NETIF_F_NTUPLE))
2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693
			goto out;
		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
		if (rxq_index == skb_get_rx_queue(skb))
			goto out;

		rxqueue = dev->_rx + rxq_index;
		flow_table = rcu_dereference(rxqueue->rps_flow_table);
		if (!flow_table)
			goto out;
		flow_id = skb->rxhash & flow_table->mask;
		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
							rxq_index, flow_id);
		if (rc < 0)
			goto out;
		old_rflow = rflow;
		rflow = &flow_table->flows[flow_id];
		rflow->filter = rc;
		if (old_rflow->filter == rflow->filter)
			old_rflow->filter = RPS_NO_FILTER;
	out:
#endif
		rflow->last_qtail =
2694
			per_cpu(softnet_data, next_cpu).input_queue_head;
2695 2696
	}

2697
	rflow->cpu = next_cpu;
2698 2699 2700
	return rflow;
}

2701 2702 2703 2704 2705 2706 2707 2708 2709
/*
 * get_rps_cpu is called from netif_receive_skb and returns the target
 * CPU from the RPS map of the receiving queue for a given skb.
 * rcu_read_lock must be held on entry.
 */
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
		       struct rps_dev_flow **rflowp)
{
	struct netdev_rx_queue *rxqueue;
E
Eric Dumazet 已提交
2710
	struct rps_map *map;
2711 2712 2713 2714 2715 2716 2717
	struct rps_dev_flow_table *flow_table;
	struct rps_sock_flow_table *sock_flow_table;
	int cpu = -1;
	u16 tcpu;

	if (skb_rx_queue_recorded(skb)) {
		u16 index = skb_get_rx_queue(skb);
2718 2719 2720 2721 2722
		if (unlikely(index >= dev->real_num_rx_queues)) {
			WARN_ONCE(dev->real_num_rx_queues > 1,
				  "%s received packet on queue %u, but number "
				  "of RX queues is %u\n",
				  dev->name, index, dev->real_num_rx_queues);
2723 2724 2725 2726 2727 2728
			goto done;
		}
		rxqueue = dev->_rx + index;
	} else
		rxqueue = dev->_rx;

E
Eric Dumazet 已提交
2729 2730
	map = rcu_dereference(rxqueue->rps_map);
	if (map) {
2731
		if (map->len == 1 &&
2732
		    !rcu_access_pointer(rxqueue->rps_flow_table)) {
2733 2734 2735 2736 2737
			tcpu = map->cpus[0];
			if (cpu_online(tcpu))
				cpu = tcpu;
			goto done;
		}
2738
	} else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2739
		goto done;
2740
	}
2741

2742
	skb_reset_network_header(skb);
2743 2744 2745
	if (!skb_get_rxhash(skb))
		goto done;

T
Tom Herbert 已提交
2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771
	flow_table = rcu_dereference(rxqueue->rps_flow_table);
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	if (flow_table && sock_flow_table) {
		u16 next_cpu;
		struct rps_dev_flow *rflow;

		rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
		tcpu = rflow->cpu;

		next_cpu = sock_flow_table->ents[skb->rxhash &
		    sock_flow_table->mask];

		/*
		 * If the desired CPU (where last recvmsg was done) is
		 * different from current CPU (one in the rx-queue flow
		 * table entry), switch if one of the following holds:
		 *   - Current CPU is unset (equal to RPS_NO_CPU).
		 *   - Current CPU is offline.
		 *   - The current CPU's queue tail has advanced beyond the
		 *     last packet that was enqueued using this table entry.
		 *     This guarantees that all previous packets for the flow
		 *     have been dequeued, thus preserving in order delivery.
		 */
		if (unlikely(tcpu != next_cpu) &&
		    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2772 2773 2774
		      rflow->last_qtail)) >= 0))
			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);

T
Tom Herbert 已提交
2775 2776 2777 2778 2779 2780 2781
		if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
			*rflowp = rflow;
			cpu = tcpu;
			goto done;
		}
	}

T
Tom Herbert 已提交
2782
	if (map) {
T
Tom Herbert 已提交
2783
		tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
T
Tom Herbert 已提交
2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794

		if (cpu_online(tcpu)) {
			cpu = tcpu;
			goto done;
		}
	}

done:
	return cpu;
}

2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834
#ifdef CONFIG_RFS_ACCEL

/**
 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
 * @dev: Device on which the filter was set
 * @rxq_index: RX queue index
 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
 *
 * Drivers that implement ndo_rx_flow_steer() should periodically call
 * this function for each installed filter and remove the filters for
 * which it returns %true.
 */
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
			 u32 flow_id, u16 filter_id)
{
	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
	struct rps_dev_flow_table *flow_table;
	struct rps_dev_flow *rflow;
	bool expire = true;
	int cpu;

	rcu_read_lock();
	flow_table = rcu_dereference(rxqueue->rps_flow_table);
	if (flow_table && flow_id <= flow_table->mask) {
		rflow = &flow_table->flows[flow_id];
		cpu = ACCESS_ONCE(rflow->cpu);
		if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
			   rflow->last_qtail) <
		     (int)(10 * flow_table->mask)))
			expire = false;
	}
	rcu_read_unlock();
	return expire;
}
EXPORT_SYMBOL(rps_may_expire_flow);

#endif /* CONFIG_RFS_ACCEL */

T
Tom Herbert 已提交
2835
/* Called from hardirq (IPI) context */
E
Eric Dumazet 已提交
2836
static void rps_trigger_softirq(void *data)
T
Tom Herbert 已提交
2837
{
E
Eric Dumazet 已提交
2838 2839
	struct softnet_data *sd = data;

E
Eric Dumazet 已提交
2840
	____napi_schedule(sd, &sd->backlog);
C
Changli Gao 已提交
2841
	sd->received_rps++;
T
Tom Herbert 已提交
2842
}
E
Eric Dumazet 已提交
2843

T
Tom Herbert 已提交
2844
#endif /* CONFIG_RPS */
T
Tom Herbert 已提交
2845

E
Eric Dumazet 已提交
2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866
/*
 * Check if this softnet_data structure is another cpu one
 * If yes, queue it to our IPI list and return 1
 * If no, return 0
 */
static int rps_ipi_queued(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
	struct softnet_data *mysd = &__get_cpu_var(softnet_data);

	if (sd != mysd) {
		sd->rps_ipi_next = mysd->rps_ipi_list;
		mysd->rps_ipi_list = sd;

		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
		return 1;
	}
#endif /* CONFIG_RPS */
	return 0;
}

T
Tom Herbert 已提交
2867 2868 2869 2870
/*
 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
 * queue (may be a remote CPU queue).
 */
T
Tom Herbert 已提交
2871 2872
static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
			      unsigned int *qtail)
T
Tom Herbert 已提交
2873
{
E
Eric Dumazet 已提交
2874
	struct softnet_data *sd;
T
Tom Herbert 已提交
2875 2876
	unsigned long flags;

E
Eric Dumazet 已提交
2877
	sd = &per_cpu(softnet_data, cpu);
T
Tom Herbert 已提交
2878 2879 2880

	local_irq_save(flags);

E
Eric Dumazet 已提交
2881
	rps_lock(sd);
2882 2883
	if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
		if (skb_queue_len(&sd->input_pkt_queue)) {
T
Tom Herbert 已提交
2884
enqueue:
E
Eric Dumazet 已提交
2885
			__skb_queue_tail(&sd->input_pkt_queue, skb);
2886
			input_queue_tail_incr_save(sd, qtail);
E
Eric Dumazet 已提交
2887
			rps_unlock(sd);
2888
			local_irq_restore(flags);
T
Tom Herbert 已提交
2889 2890 2891
			return NET_RX_SUCCESS;
		}

2892 2893 2894 2895
		/* Schedule NAPI for backlog device
		 * We can use non atomic operation since we own the queue lock
		 */
		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
E
Eric Dumazet 已提交
2896
			if (!rps_ipi_queued(sd))
E
Eric Dumazet 已提交
2897
				____napi_schedule(sd, &sd->backlog);
T
Tom Herbert 已提交
2898 2899 2900 2901
		}
		goto enqueue;
	}

C
Changli Gao 已提交
2902
	sd->dropped++;
E
Eric Dumazet 已提交
2903
	rps_unlock(sd);
T
Tom Herbert 已提交
2904 2905 2906

	local_irq_restore(flags);

2907
	atomic_long_inc(&skb->dev->rx_dropped);
T
Tom Herbert 已提交
2908 2909 2910
	kfree_skb(skb);
	return NET_RX_DROP;
}
L
Linus Torvalds 已提交
2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928

/**
 *	netif_rx	-	post buffer to the network code
 *	@skb: buffer to post
 *
 *	This function receives a packet from a device driver and queues it for
 *	the upper (protocol) levels to process.  It always succeeds. The buffer
 *	may be dropped during processing for congestion control or by the
 *	protocol layers.
 *
 *	return values:
 *	NET_RX_SUCCESS	(no congestion)
 *	NET_RX_DROP     (packet was dropped)
 *
 */

int netif_rx(struct sk_buff *skb)
{
2929
	int ret;
L
Linus Torvalds 已提交
2930 2931 2932 2933 2934

	/* if netpoll wants it, pretend we never saw it */
	if (netpoll_rx(skb))
		return NET_RX_DROP;

2935
	net_timestamp_check(netdev_tstamp_prequeue, skb);
L
Linus Torvalds 已提交
2936

2937
	trace_netif_rx(skb);
E
Eric Dumazet 已提交
2938
#ifdef CONFIG_RPS
2939
	if (static_key_false(&rps_needed)) {
T
Tom Herbert 已提交
2940
		struct rps_dev_flow voidflow, *rflow = &voidflow;
2941 2942
		int cpu;

2943
		preempt_disable();
2944
		rcu_read_lock();
T
Tom Herbert 已提交
2945 2946

		cpu = get_rps_cpu(skb->dev, skb, &rflow);
2947 2948
		if (cpu < 0)
			cpu = smp_processor_id();
T
Tom Herbert 已提交
2949 2950 2951

		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);

2952
		rcu_read_unlock();
2953
		preempt_enable();
2954 2955
	} else
#endif
T
Tom Herbert 已提交
2956 2957 2958 2959 2960
	{
		unsigned int qtail;
		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
		put_cpu();
	}
2961
	return ret;
L
Linus Torvalds 已提交
2962
}
E
Eric Dumazet 已提交
2963
EXPORT_SYMBOL(netif_rx);
L
Linus Torvalds 已提交
2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994

int netif_rx_ni(struct sk_buff *skb)
{
	int err;

	preempt_disable();
	err = netif_rx(skb);
	if (local_softirq_pending())
		do_softirq();
	preempt_enable();

	return err;
}
EXPORT_SYMBOL(netif_rx_ni);

static void net_tx_action(struct softirq_action *h)
{
	struct softnet_data *sd = &__get_cpu_var(softnet_data);

	if (sd->completion_queue) {
		struct sk_buff *clist;

		local_irq_disable();
		clist = sd->completion_queue;
		sd->completion_queue = NULL;
		local_irq_enable();

		while (clist) {
			struct sk_buff *skb = clist;
			clist = clist->next;

2995
			WARN_ON(atomic_read(&skb->users));
2996
			trace_kfree_skb(skb, net_tx_action);
L
Linus Torvalds 已提交
2997 2998 2999 3000 3001
			__kfree_skb(skb);
		}
	}

	if (sd->output_queue) {
3002
		struct Qdisc *head;
L
Linus Torvalds 已提交
3003 3004 3005 3006

		local_irq_disable();
		head = sd->output_queue;
		sd->output_queue = NULL;
3007
		sd->output_queue_tailp = &sd->output_queue;
L
Linus Torvalds 已提交
3008 3009 3010
		local_irq_enable();

		while (head) {
3011 3012 3013
			struct Qdisc *q = head;
			spinlock_t *root_lock;

L
Linus Torvalds 已提交
3014 3015
			head = head->next_sched;

3016
			root_lock = qdisc_lock(q);
3017
			if (spin_trylock(root_lock)) {
3018 3019 3020
				smp_mb__before_clear_bit();
				clear_bit(__QDISC_STATE_SCHED,
					  &q->state);
3021 3022
				qdisc_run(q);
				spin_unlock(root_lock);
L
Linus Torvalds 已提交
3023
			} else {
3024
				if (!test_bit(__QDISC_STATE_DEACTIVATED,
3025
					      &q->state)) {
3026
					__netif_reschedule(q);
3027 3028 3029 3030 3031
				} else {
					smp_mb__before_clear_bit();
					clear_bit(__QDISC_STATE_SCHED,
						  &q->state);
				}
L
Linus Torvalds 已提交
3032 3033 3034 3035 3036
			}
		}
	}
}

3037 3038
#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
    (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3039 3040 3041
/* This hook is defined here for ATM LANE */
int (*br_fdb_test_addr_hook)(struct net_device *dev,
			     unsigned char *addr) __read_mostly;
3042
EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3043
#endif
L
Linus Torvalds 已提交
3044 3045 3046 3047 3048 3049

#ifdef CONFIG_NET_CLS_ACT
/* TODO: Maybe we should just force sch_ingress to be compiled in
 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
 * a compare and 2 stores extra right now if we dont have it on
 * but have CONFIG_NET_CLS_ACT
L
Lucas De Marchi 已提交
3050 3051
 * NOTE: This doesn't stop any functionality; if you dont have
 * the ingress scheduler, you just can't add policies on ingress.
L
Linus Torvalds 已提交
3052 3053
 *
 */
3054
static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
L
Linus Torvalds 已提交
3055 3056
{
	struct net_device *dev = skb->dev;
3057
	u32 ttl = G_TC_RTTL(skb->tc_verd);
3058 3059
	int result = TC_ACT_OK;
	struct Qdisc *q;
3060

3061 3062
	if (unlikely(MAX_RED_LOOP < ttl++)) {
		if (net_ratelimit())
3063 3064
			pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
				skb->skb_iif, dev->ifindex);
3065 3066
		return TC_ACT_SHOT;
	}
L
Linus Torvalds 已提交
3067

3068 3069
	skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
L
Linus Torvalds 已提交
3070

3071
	q = rxq->qdisc;
3072
	if (q != &noop_qdisc) {
3073
		spin_lock(qdisc_lock(q));
3074 3075
		if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
			result = qdisc_enqueue_root(skb, q);
3076 3077
		spin_unlock(qdisc_lock(q));
	}
3078 3079 3080

	return result;
}
3081

3082 3083 3084 3085
static inline struct sk_buff *handle_ing(struct sk_buff *skb,
					 struct packet_type **pt_prev,
					 int *ret, struct net_device *orig_dev)
{
3086 3087 3088
	struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);

	if (!rxq || rxq->qdisc == &noop_qdisc)
3089
		goto out;
L
Linus Torvalds 已提交
3090

3091 3092 3093
	if (*pt_prev) {
		*ret = deliver_skb(skb, *pt_prev, orig_dev);
		*pt_prev = NULL;
L
Linus Torvalds 已提交
3094 3095
	}

3096
	switch (ing_filter(skb, rxq)) {
3097 3098 3099 3100 3101 3102 3103 3104 3105
	case TC_ACT_SHOT:
	case TC_ACT_STOLEN:
		kfree_skb(skb);
		return NULL;
	}

out:
	skb->tc_verd = 0;
	return skb;
L
Linus Torvalds 已提交
3106 3107 3108
}
#endif

3109 3110 3111 3112
/**
 *	netdev_rx_handler_register - register receive handler
 *	@dev: device to register a handler for
 *	@rx_handler: receive handler to register
J
Jiri Pirko 已提交
3113
 *	@rx_handler_data: data pointer that is used by rx handler
3114 3115 3116 3117 3118 3119
 *
 *	Register a receive hander for a device. This handler will then be
 *	called from __netif_receive_skb. A negative errno code is returned
 *	on a failure.
 *
 *	The caller must hold the rtnl_mutex.
3120 3121
 *
 *	For a general description of rx_handler, see enum rx_handler_result.
3122 3123
 */
int netdev_rx_handler_register(struct net_device *dev,
J
Jiri Pirko 已提交
3124 3125
			       rx_handler_func_t *rx_handler,
			       void *rx_handler_data)
3126 3127 3128 3129 3130 3131
{
	ASSERT_RTNL();

	if (dev->rx_handler)
		return -EBUSY;

J
Jiri Pirko 已提交
3132
	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150
	rcu_assign_pointer(dev->rx_handler, rx_handler);

	return 0;
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_register);

/**
 *	netdev_rx_handler_unregister - unregister receive handler
 *	@dev: device to unregister a handler from
 *
 *	Unregister a receive hander from a device.
 *
 *	The caller must hold the rtnl_mutex.
 */
void netdev_rx_handler_unregister(struct net_device *dev)
{

	ASSERT_RTNL();
3151 3152
	RCU_INIT_POINTER(dev->rx_handler, NULL);
	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3153 3154 3155
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);

3156
static int __netif_receive_skb(struct sk_buff *skb)
L
Linus Torvalds 已提交
3157 3158
{
	struct packet_type *ptype, *pt_prev;
3159
	rx_handler_func_t *rx_handler;
D
David S. Miller 已提交
3160
	struct net_device *orig_dev;
3161
	struct net_device *null_or_dev;
3162
	bool deliver_exact = false;
L
Linus Torvalds 已提交
3163
	int ret = NET_RX_DROP;
A
Al Viro 已提交
3164
	__be16 type;
L
Linus Torvalds 已提交
3165

3166
	net_timestamp_check(!netdev_tstamp_prequeue, skb);
3167

3168
	trace_netif_receive_skb(skb);
3169

L
Linus Torvalds 已提交
3170
	/* if we've gotten here through NAPI, check netpoll */
3171
	if (netpoll_receive_skb(skb))
L
Linus Torvalds 已提交
3172 3173
		return NET_RX_DROP;

3174 3175
	if (!skb->skb_iif)
		skb->skb_iif = skb->dev->ifindex;
J
Joe Eykholt 已提交
3176
	orig_dev = skb->dev;
3177

3178
	skb_reset_network_header(skb);
3179
	skb_reset_transport_header(skb);
3180
	skb_reset_mac_len(skb);
L
Linus Torvalds 已提交
3181 3182 3183 3184 3185

	pt_prev = NULL;

	rcu_read_lock();

3186 3187 3188 3189
another_round:

	__this_cpu_inc(softnet_data.processed);

3190 3191 3192 3193 3194 3195
	if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
		skb = vlan_untag(skb);
		if (unlikely(!skb))
			goto out;
	}

L
Linus Torvalds 已提交
3196 3197 3198 3199 3200 3201 3202 3203
#ifdef CONFIG_NET_CLS_ACT
	if (skb->tc_verd & TC_NCLS) {
		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
		goto ncls;
	}
#endif

	list_for_each_entry_rcu(ptype, &ptype_all, list) {
3204
		if (!ptype->dev || ptype->dev == skb->dev) {
3205
			if (pt_prev)
D
David S. Miller 已提交
3206
				ret = deliver_skb(skb, pt_prev, orig_dev);
L
Linus Torvalds 已提交
3207 3208 3209 3210 3211
			pt_prev = ptype;
		}
	}

#ifdef CONFIG_NET_CLS_ACT
3212 3213
	skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
	if (!skb)
L
Linus Torvalds 已提交
3214 3215 3216 3217
		goto out;
ncls:
#endif

3218
	rx_handler = rcu_dereference(skb->dev->rx_handler);
3219 3220 3221 3222 3223
	if (vlan_tx_tag_present(skb)) {
		if (pt_prev) {
			ret = deliver_skb(skb, pt_prev, orig_dev);
			pt_prev = NULL;
		}
3224
		if (vlan_do_receive(&skb, !rx_handler))
3225 3226 3227 3228 3229
			goto another_round;
		else if (unlikely(!skb))
			goto out;
	}

3230 3231 3232 3233 3234
	if (rx_handler) {
		if (pt_prev) {
			ret = deliver_skb(skb, pt_prev, orig_dev);
			pt_prev = NULL;
		}
3235 3236
		switch (rx_handler(&skb)) {
		case RX_HANDLER_CONSUMED:
3237
			goto out;
3238
		case RX_HANDLER_ANOTHER:
3239
			goto another_round;
3240 3241 3242 3243 3244 3245 3246
		case RX_HANDLER_EXACT:
			deliver_exact = true;
		case RX_HANDLER_PASS:
			break;
		default:
			BUG();
		}
3247
	}
L
Linus Torvalds 已提交
3248

3249
	/* deliver only exact match when indicated */
3250
	null_or_dev = deliver_exact ? skb->dev : NULL;
3251

L
Linus Torvalds 已提交
3252
	type = skb->protocol;
3253 3254
	list_for_each_entry_rcu(ptype,
			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3255
		if (ptype->type == type &&
3256 3257
		    (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
		     ptype->dev == orig_dev)) {
3258
			if (pt_prev)
D
David S. Miller 已提交
3259
				ret = deliver_skb(skb, pt_prev, orig_dev);
L
Linus Torvalds 已提交
3260 3261 3262 3263 3264
			pt_prev = ptype;
		}
	}

	if (pt_prev) {
D
David S. Miller 已提交
3265
		ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
L
Linus Torvalds 已提交
3266
	} else {
3267
		atomic_long_inc(&skb->dev->rx_dropped);
L
Linus Torvalds 已提交
3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278
		kfree_skb(skb);
		/* Jamal, now you will not able to escape explaining
		 * me how you were going to use this. :-)
		 */
		ret = NET_RX_DROP;
	}

out:
	rcu_read_unlock();
	return ret;
}
T
Tom Herbert 已提交
3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296

/**
 *	netif_receive_skb - process receive buffer from network
 *	@skb: buffer to process
 *
 *	netif_receive_skb() is the main receive data processing function.
 *	It always succeeds. The buffer may be dropped during processing
 *	for congestion control or by the protocol layers.
 *
 *	This function may only be called from softirq context and interrupts
 *	should be enabled.
 *
 *	Return values (usually ignored):
 *	NET_RX_SUCCESS: no congestion
 *	NET_RX_DROP: packet was dropped
 */
int netif_receive_skb(struct sk_buff *skb)
{
3297
	net_timestamp_check(netdev_tstamp_prequeue, skb);
E
Eric Dumazet 已提交
3298

3299 3300 3301
	if (skb_defer_rx_timestamp(skb))
		return NET_RX_SUCCESS;

E
Eric Dumazet 已提交
3302
#ifdef CONFIG_RPS
3303
	if (static_key_false(&rps_needed)) {
E
Eric Dumazet 已提交
3304 3305
		struct rps_dev_flow voidflow, *rflow = &voidflow;
		int cpu, ret;
T
Tom Herbert 已提交
3306

E
Eric Dumazet 已提交
3307 3308 3309
		rcu_read_lock();

		cpu = get_rps_cpu(skb->dev, skb, &rflow);
T
Tom Herbert 已提交
3310

E
Eric Dumazet 已提交
3311 3312 3313
		if (cpu >= 0) {
			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
			rcu_read_unlock();
3314
			return ret;
E
Eric Dumazet 已提交
3315
		}
3316
		rcu_read_unlock();
T
Tom Herbert 已提交
3317
	}
3318
#endif
3319
	return __netif_receive_skb(skb);
T
Tom Herbert 已提交
3320
}
E
Eric Dumazet 已提交
3321
EXPORT_SYMBOL(netif_receive_skb);
L
Linus Torvalds 已提交
3322

E
Eric Dumazet 已提交
3323 3324 3325
/* Network device is going away, flush any packets still pending
 * Called with irqs disabled.
 */
3326
static void flush_backlog(void *arg)
3327
{
3328
	struct net_device *dev = arg;
E
Eric Dumazet 已提交
3329
	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3330 3331
	struct sk_buff *skb, *tmp;

E
Eric Dumazet 已提交
3332
	rps_lock(sd);
3333
	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3334
		if (skb->dev == dev) {
E
Eric Dumazet 已提交
3335
			__skb_unlink(skb, &sd->input_pkt_queue);
3336
			kfree_skb(skb);
3337
			input_queue_head_incr(sd);
3338
		}
3339
	}
E
Eric Dumazet 已提交
3340
	rps_unlock(sd);
3341 3342 3343 3344 3345

	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
		if (skb->dev == dev) {
			__skb_unlink(skb, &sd->process_queue);
			kfree_skb(skb);
3346
			input_queue_head_incr(sd);
3347 3348
		}
	}
3349 3350
}

3351 3352 3353 3354 3355 3356 3357
static int napi_gro_complete(struct sk_buff *skb)
{
	struct packet_type *ptype;
	__be16 type = skb->protocol;
	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
	int err = -ENOENT;

3358 3359
	if (NAPI_GRO_CB(skb)->count == 1) {
		skb_shinfo(skb)->gso_size = 0;
3360
		goto out;
3361
	}
3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382

	rcu_read_lock();
	list_for_each_entry_rcu(ptype, head, list) {
		if (ptype->type != type || ptype->dev || !ptype->gro_complete)
			continue;

		err = ptype->gro_complete(skb);
		break;
	}
	rcu_read_unlock();

	if (err) {
		WARN_ON(&ptype->list == head);
		kfree_skb(skb);
		return NET_RX_SUCCESS;
	}

out:
	return netif_receive_skb(skb);
}

E
Eric Dumazet 已提交
3383
inline void napi_gro_flush(struct napi_struct *napi)
3384 3385 3386 3387 3388 3389 3390 3391 3392
{
	struct sk_buff *skb, *next;

	for (skb = napi->gro_list; skb; skb = next) {
		next = skb->next;
		skb->next = NULL;
		napi_gro_complete(skb);
	}

3393
	napi->gro_count = 0;
3394 3395
	napi->gro_list = NULL;
}
E
Eric Dumazet 已提交
3396
EXPORT_SYMBOL(napi_gro_flush);
3397

3398
enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3399 3400 3401 3402 3403
{
	struct sk_buff **pp = NULL;
	struct packet_type *ptype;
	__be16 type = skb->protocol;
	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
H
Herbert Xu 已提交
3404
	int same_flow;
3405
	int mac_len;
3406
	enum gro_result ret;
3407

3408
	if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3409 3410
		goto normal;

3411
	if (skb_is_gso(skb) || skb_has_frag_list(skb))
3412 3413
		goto normal;

3414 3415 3416 3417 3418
	rcu_read_lock();
	list_for_each_entry_rcu(ptype, head, list) {
		if (ptype->type != type || ptype->dev || !ptype->gro_receive)
			continue;

3419
		skb_set_network_header(skb, skb_gro_offset(skb));
3420 3421 3422 3423
		mac_len = skb->network_header - skb->mac_header;
		skb->mac_len = mac_len;
		NAPI_GRO_CB(skb)->same_flow = 0;
		NAPI_GRO_CB(skb)->flush = 0;
H
Herbert Xu 已提交
3424
		NAPI_GRO_CB(skb)->free = 0;
3425 3426 3427 3428 3429 3430 3431 3432 3433

		pp = ptype->gro_receive(&napi->gro_list, skb);
		break;
	}
	rcu_read_unlock();

	if (&ptype->list == head)
		goto normal;

H
Herbert Xu 已提交
3434
	same_flow = NAPI_GRO_CB(skb)->same_flow;
3435
	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
H
Herbert Xu 已提交
3436

3437 3438 3439 3440 3441 3442
	if (pp) {
		struct sk_buff *nskb = *pp;

		*pp = nskb->next;
		nskb->next = NULL;
		napi_gro_complete(nskb);
3443
		napi->gro_count--;
3444 3445
	}

H
Herbert Xu 已提交
3446
	if (same_flow)
3447 3448
		goto ok;

3449
	if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3450 3451
		goto normal;

3452
	napi->gro_count++;
3453
	NAPI_GRO_CB(skb)->count = 1;
3454
	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3455 3456
	skb->next = napi->gro_list;
	napi->gro_list = skb;
3457
	ret = GRO_HELD;
3458

3459
pull:
H
Herbert Xu 已提交
3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470
	if (skb_headlen(skb) < skb_gro_offset(skb)) {
		int grow = skb_gro_offset(skb) - skb_headlen(skb);

		BUG_ON(skb->end - skb->tail < grow);

		memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);

		skb->tail += grow;
		skb->data_len -= grow;

		skb_shinfo(skb)->frags[0].page_offset += grow;
E
Eric Dumazet 已提交
3471
		skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
H
Herbert Xu 已提交
3472

E
Eric Dumazet 已提交
3473
		if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3474
			skb_frag_unref(skb, 0);
H
Herbert Xu 已提交
3475 3476
			memmove(skb_shinfo(skb)->frags,
				skb_shinfo(skb)->frags + 1,
3477
				--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
H
Herbert Xu 已提交
3478
		}
3479 3480
	}

3481
ok:
3482
	return ret;
3483 3484

normal:
3485 3486
	ret = GRO_NORMAL;
	goto pull;
H
Herbert Xu 已提交
3487
}
3488 3489
EXPORT_SYMBOL(dev_gro_receive);

3490
static inline gro_result_t
3491
__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3492 3493
{
	struct sk_buff *p;
E
Eric Dumazet 已提交
3494
	unsigned int maclen = skb->dev->hard_header_len;
3495 3496

	for (p = napi->gro_list; p; p = p->next) {
3497 3498 3499
		unsigned long diffs;

		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3500
		diffs |= p->vlan_tci ^ skb->vlan_tci;
E
Eric Dumazet 已提交
3501 3502 3503 3504 3505 3506 3507
		if (maclen == ETH_HLEN)
			diffs |= compare_ether_header(skb_mac_header(p),
						      skb_gro_mac_header(skb));
		else if (!diffs)
			diffs = memcmp(skb_mac_header(p),
				       skb_gro_mac_header(skb),
				       maclen);
3508
		NAPI_GRO_CB(p)->same_flow = !diffs;
3509 3510 3511 3512 3513
		NAPI_GRO_CB(p)->flush = 0;
	}

	return dev_gro_receive(napi, skb);
}
H
Herbert Xu 已提交
3514

3515
gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
H
Herbert Xu 已提交
3516
{
3517 3518
	switch (ret) {
	case GRO_NORMAL:
3519 3520 3521
		if (netif_receive_skb(skb))
			ret = GRO_DROP;
		break;
H
Herbert Xu 已提交
3522

3523 3524
	case GRO_DROP:
	case GRO_MERGED_FREE:
H
Herbert Xu 已提交
3525 3526
		kfree_skb(skb);
		break;
3527 3528 3529 3530

	case GRO_HELD:
	case GRO_MERGED:
		break;
H
Herbert Xu 已提交
3531 3532
	}

3533
	return ret;
3534 3535 3536
}
EXPORT_SYMBOL(napi_skb_finish);

3537 3538 3539 3540
void skb_gro_reset_offset(struct sk_buff *skb)
{
	NAPI_GRO_CB(skb)->data_offset = 0;
	NAPI_GRO_CB(skb)->frag0 = NULL;
3541
	NAPI_GRO_CB(skb)->frag0_len = 0;
3542

3543
	if (skb->mac_header == skb->tail &&
3544
	    !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3545
		NAPI_GRO_CB(skb)->frag0 =
3546
			skb_frag_address(&skb_shinfo(skb)->frags[0]);
E
Eric Dumazet 已提交
3547
		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3548
	}
3549 3550 3551
}
EXPORT_SYMBOL(skb_gro_reset_offset);

3552
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3553
{
3554 3555
	skb_gro_reset_offset(skb);

3556
	return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3557 3558 3559
}
EXPORT_SYMBOL(napi_gro_receive);

S
stephen hemminger 已提交
3560
static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3561 3562 3563
{
	__skb_pull(skb, skb_headlen(skb));
	skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3564
	skb->vlan_tci = 0;
H
Herbert Xu 已提交
3565
	skb->dev = napi->dev;
A
Andy Gospodarek 已提交
3566
	skb->skb_iif = 0;
3567 3568 3569 3570

	napi->skb = skb;
}

3571
struct sk_buff *napi_get_frags(struct napi_struct *napi)
H
Herbert Xu 已提交
3572 3573 3574 3575
{
	struct sk_buff *skb = napi->skb;

	if (!skb) {
3576 3577 3578
		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
		if (skb)
			napi->skb = skb;
3579
	}
3580 3581
	return skb;
}
3582
EXPORT_SYMBOL(napi_get_frags);
3583

3584 3585
gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
			       gro_result_t ret)
3586
{
3587 3588
	switch (ret) {
	case GRO_NORMAL:
3589
	case GRO_HELD:
3590
		skb->protocol = eth_type_trans(skb, skb->dev);
3591

3592 3593 3594 3595
		if (ret == GRO_HELD)
			skb_gro_pull(skb, -ETH_HLEN);
		else if (netif_receive_skb(skb))
			ret = GRO_DROP;
3596
		break;
H
Herbert Xu 已提交
3597

3598 3599 3600 3601
	case GRO_DROP:
	case GRO_MERGED_FREE:
		napi_reuse_skb(napi, skb);
		break;
3602 3603 3604

	case GRO_MERGED:
		break;
3605
	}
H
Herbert Xu 已提交
3606

3607
	return ret;
H
Herbert Xu 已提交
3608
}
3609 3610
EXPORT_SYMBOL(napi_frags_finish);

3611 3612 3613 3614
struct sk_buff *napi_frags_skb(struct napi_struct *napi)
{
	struct sk_buff *skb = napi->skb;
	struct ethhdr *eth;
3615 3616
	unsigned int hlen;
	unsigned int off;
3617 3618 3619 3620 3621 3622

	napi->skb = NULL;

	skb_reset_mac_header(skb);
	skb_gro_reset_offset(skb);

3623 3624 3625 3626 3627 3628 3629 3630 3631 3632
	off = skb_gro_offset(skb);
	hlen = off + sizeof(*eth);
	eth = skb_gro_header_fast(skb, off);
	if (skb_gro_header_hard(skb, hlen)) {
		eth = skb_gro_header_slow(skb, hlen, off);
		if (unlikely(!eth)) {
			napi_reuse_skb(napi, skb);
			skb = NULL;
			goto out;
		}
3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647
	}

	skb_gro_pull(skb, sizeof(*eth));

	/*
	 * This works because the only protocols we care about don't require
	 * special handling.  We'll fix it up properly at the end.
	 */
	skb->protocol = eth->h_proto;

out:
	return skb;
}
EXPORT_SYMBOL(napi_frags_skb);

3648
gro_result_t napi_gro_frags(struct napi_struct *napi)
3649
{
3650
	struct sk_buff *skb = napi_frags_skb(napi);
3651 3652

	if (!skb)
3653
		return GRO_DROP;
3654 3655 3656

	return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
}
H
Herbert Xu 已提交
3657 3658
EXPORT_SYMBOL(napi_gro_frags);

3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686
/*
 * net_rps_action sends any pending IPI's for rps.
 * Note: called with local irq disabled, but exits with local irq enabled.
 */
static void net_rps_action_and_irq_enable(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
	struct softnet_data *remsd = sd->rps_ipi_list;

	if (remsd) {
		sd->rps_ipi_list = NULL;

		local_irq_enable();

		/* Send pending IPI's to kick RPS processing on remote cpus. */
		while (remsd) {
			struct softnet_data *next = remsd->rps_ipi_next;

			if (cpu_online(remsd->cpu))
				__smp_call_function_single(remsd->cpu,
							   &remsd->csd, 0);
			remsd = next;
		}
	} else
#endif
		local_irq_enable();
}

3687
static int process_backlog(struct napi_struct *napi, int quota)
L
Linus Torvalds 已提交
3688 3689
{
	int work = 0;
E
Eric Dumazet 已提交
3690
	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
L
Linus Torvalds 已提交
3691

3692 3693 3694 3695 3696 3697 3698 3699 3700
#ifdef CONFIG_RPS
	/* Check if we have pending ipi, its better to send them now,
	 * not waiting net_rx_action() end.
	 */
	if (sd->rps_ipi_list) {
		local_irq_disable();
		net_rps_action_and_irq_enable(sd);
	}
#endif
3701
	napi->weight = weight_p;
3702 3703
	local_irq_disable();
	while (work < quota) {
L
Linus Torvalds 已提交
3704
		struct sk_buff *skb;
3705 3706 3707 3708 3709 3710
		unsigned int qlen;

		while ((skb = __skb_dequeue(&sd->process_queue))) {
			local_irq_enable();
			__netif_receive_skb(skb);
			local_irq_disable();
3711 3712 3713 3714 3715
			input_queue_head_incr(sd);
			if (++work >= quota) {
				local_irq_enable();
				return work;
			}
3716
		}
L
Linus Torvalds 已提交
3717

E
Eric Dumazet 已提交
3718
		rps_lock(sd);
3719
		qlen = skb_queue_len(&sd->input_pkt_queue);
3720
		if (qlen)
3721 3722
			skb_queue_splice_tail_init(&sd->input_pkt_queue,
						   &sd->process_queue);
3723

3724
		if (qlen < quota - work) {
E
Eric Dumazet 已提交
3725 3726 3727 3728 3729 3730 3731 3732 3733 3734
			/*
			 * Inline a custom version of __napi_complete().
			 * only current cpu owns and manipulates this napi,
			 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
			 * we can use a plain write instead of clear_bit(),
			 * and we dont need an smp_mb() memory barrier.
			 */
			list_del(&napi->poll_list);
			napi->state = 0;

3735
			quota = work + qlen;
3736
		}
E
Eric Dumazet 已提交
3737
		rps_unlock(sd);
3738 3739
	}
	local_irq_enable();
L
Linus Torvalds 已提交
3740

3741 3742
	return work;
}
L
Linus Torvalds 已提交
3743

3744 3745
/**
 * __napi_schedule - schedule for receive
3746
 * @n: entry to schedule
3747 3748 3749
 *
 * The entry's receive function will be scheduled to run
 */
H
Harvey Harrison 已提交
3750
void __napi_schedule(struct napi_struct *n)
3751 3752
{
	unsigned long flags;
L
Linus Torvalds 已提交
3753

3754
	local_irq_save(flags);
E
Eric Dumazet 已提交
3755
	____napi_schedule(&__get_cpu_var(softnet_data), n);
3756
	local_irq_restore(flags);
L
Linus Torvalds 已提交
3757
}
3758 3759
EXPORT_SYMBOL(__napi_schedule);

3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792
void __napi_complete(struct napi_struct *n)
{
	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
	BUG_ON(n->gro_list);

	list_del(&n->poll_list);
	smp_mb__before_clear_bit();
	clear_bit(NAPI_STATE_SCHED, &n->state);
}
EXPORT_SYMBOL(__napi_complete);

void napi_complete(struct napi_struct *n)
{
	unsigned long flags;

	/*
	 * don't let napi dequeue from the cpu poll list
	 * just in case its running on a different cpu
	 */
	if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
		return;

	napi_gro_flush(n);
	local_irq_save(flags);
	__napi_complete(n);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(napi_complete);

void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
		    int (*poll)(struct napi_struct *, int), int weight)
{
	INIT_LIST_HEAD(&napi->poll_list);
3793
	napi->gro_count = 0;
3794
	napi->gro_list = NULL;
H
Herbert Xu 已提交
3795
	napi->skb = NULL;
3796 3797 3798 3799
	napi->poll = poll;
	napi->weight = weight;
	list_add(&napi->dev_list, &dev->napi_list);
	napi->dev = dev;
H
Herbert Xu 已提交
3800
#ifdef CONFIG_NETPOLL
3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811
	spin_lock_init(&napi->poll_lock);
	napi->poll_owner = -1;
#endif
	set_bit(NAPI_STATE_SCHED, &napi->state);
}
EXPORT_SYMBOL(netif_napi_add);

void netif_napi_del(struct napi_struct *napi)
{
	struct sk_buff *skb, *next;

3812
	list_del_init(&napi->dev_list);
3813
	napi_free_frags(napi);
3814 3815 3816 3817 3818 3819 3820 3821

	for (skb = napi->gro_list; skb; skb = next) {
		next = skb->next;
		skb->next = NULL;
		kfree_skb(skb);
	}

	napi->gro_list = NULL;
3822
	napi->gro_count = 0;
3823 3824 3825
}
EXPORT_SYMBOL(netif_napi_del);

L
Linus Torvalds 已提交
3826 3827
static void net_rx_action(struct softirq_action *h)
{
3828
	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3829
	unsigned long time_limit = jiffies + 2;
3830
	int budget = netdev_budget;
3831 3832
	void *have;

L
Linus Torvalds 已提交
3833 3834
	local_irq_disable();

3835
	while (!list_empty(&sd->poll_list)) {
3836 3837
		struct napi_struct *n;
		int work, weight;
L
Linus Torvalds 已提交
3838

3839
		/* If softirq window is exhuasted then punt.
3840 3841
		 * Allow this to run for 2 jiffies since which will allow
		 * an average latency of 1.5/HZ.
3842
		 */
3843
		if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
L
Linus Torvalds 已提交
3844 3845 3846 3847
			goto softnet_break;

		local_irq_enable();

3848 3849 3850 3851 3852
		/* Even though interrupts have been re-enabled, this
		 * access is safe because interrupts can only add new
		 * entries to the tail of this list, and only ->poll()
		 * calls can remove this head entry from the list.
		 */
3853
		n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
L
Linus Torvalds 已提交
3854

3855 3856 3857 3858
		have = netpoll_poll_lock(n);

		weight = n->weight;

3859 3860 3861 3862
		/* This NAPI_STATE_SCHED test is for avoiding a race
		 * with netpoll's poll_napi().  Only the entity which
		 * obtains the lock and sees NAPI_STATE_SCHED set will
		 * actually make the ->poll() call.  Therefore we avoid
L
Lucas De Marchi 已提交
3863
		 * accidentally calling ->poll() when NAPI is not scheduled.
3864 3865
		 */
		work = 0;
3866
		if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3867
			work = n->poll(n, weight);
3868 3869
			trace_napi_poll(n);
		}
3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881

		WARN_ON_ONCE(work > weight);

		budget -= work;

		local_irq_disable();

		/* Drivers must not modify the NAPI state if they
		 * consume the entire weight.  In such cases this code
		 * still "owns" the NAPI instance and therefore can
		 * move the instance around on the list at-will.
		 */
3882
		if (unlikely(work == weight)) {
3883 3884 3885 3886 3887
			if (unlikely(napi_disable_pending(n))) {
				local_irq_enable();
				napi_complete(n);
				local_irq_disable();
			} else
3888
				list_move_tail(&n->poll_list, &sd->poll_list);
3889
		}
3890 3891

		netpoll_poll_unlock(have);
L
Linus Torvalds 已提交
3892 3893
	}
out:
3894
	net_rps_action_and_irq_enable(sd);
T
Tom Herbert 已提交
3895

3896 3897 3898 3899 3900
#ifdef CONFIG_NET_DMA
	/*
	 * There may not be any more sk_buffs coming right now, so push
	 * any pending DMA copies to hardware
	 */
3901
	dma_issue_pending_all();
3902
#endif
3903

L
Linus Torvalds 已提交
3904 3905 3906
	return;

softnet_break:
C
Changli Gao 已提交
3907
	sd->time_squeeze++;
L
Linus Torvalds 已提交
3908 3909 3910 3911
	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
	goto out;
}

E
Eric Dumazet 已提交
3912
static gifconf_func_t *gifconf_list[NPROTO];
L
Linus Torvalds 已提交
3913 3914 3915 3916 3917 3918 3919 3920 3921 3922

/**
 *	register_gifconf	-	register a SIOCGIF handler
 *	@family: Address family
 *	@gifconf: Function handler
 *
 *	Register protocol dependent address dumping routines. The handler
 *	that is passed must not be freed or reused until it has been replaced
 *	by another handler.
 */
E
Eric Dumazet 已提交
3923
int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
L
Linus Torvalds 已提交
3924 3925 3926 3927 3928 3929
{
	if (family >= NPROTO)
		return -EINVAL;
	gifconf_list[family] = gifconf;
	return 0;
}
E
Eric Dumazet 已提交
3930
EXPORT_SYMBOL(register_gifconf);
L
Linus Torvalds 已提交
3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943


/*
 *	Map an interface index to its name (SIOCGIFNAME)
 */

/*
 *	We need this ioctl for efficient implementation of the
 *	if_indextoname() function required by the IPv6 API.  Without
 *	it, we would have to search all the interfaces to find a
 *	match.  --pb
 */

3944
static int dev_ifname(struct net *net, struct ifreq __user *arg)
L
Linus Torvalds 已提交
3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955
{
	struct net_device *dev;
	struct ifreq ifr;

	/*
	 *	Fetch the caller's info block.
	 */

	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
		return -EFAULT;

3956 3957
	rcu_read_lock();
	dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
L
Linus Torvalds 已提交
3958
	if (!dev) {
3959
		rcu_read_unlock();
L
Linus Torvalds 已提交
3960 3961 3962 3963
		return -ENODEV;
	}

	strcpy(ifr.ifr_name, dev->name);
3964
	rcu_read_unlock();
L
Linus Torvalds 已提交
3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976

	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
		return -EFAULT;
	return 0;
}

/*
 *	Perform a SIOCGIFCONF call. This structure will change
 *	size eventually, and there is nothing I can do about it.
 *	Thus we will need a 'compatibility mode'.
 */

3977
static int dev_ifconf(struct net *net, char __user *arg)
L
Linus Torvalds 已提交
3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000
{
	struct ifconf ifc;
	struct net_device *dev;
	char __user *pos;
	int len;
	int total;
	int i;

	/*
	 *	Fetch the caller's info block.
	 */

	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
		return -EFAULT;

	pos = ifc.ifc_buf;
	len = ifc.ifc_len;

	/*
	 *	Loop over the interfaces, and write an info block for each.
	 */

	total = 0;
4001
	for_each_netdev(net, dev) {
L
Linus Torvalds 已提交
4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014
		for (i = 0; i < NPROTO; i++) {
			if (gifconf_list[i]) {
				int done;
				if (!pos)
					done = gifconf_list[i](dev, NULL, 0);
				else
					done = gifconf_list[i](dev, pos + total,
							       len - total);
				if (done < 0)
					return -EFAULT;
				total += done;
			}
		}
4015
	}
L
Linus Torvalds 已提交
4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028

	/*
	 *	All done.  Write the updated control block back to the caller.
	 */
	ifc.ifc_len = total;

	/*
	 * 	Both BSD and Solaris return 0 here, so we do too.
	 */
	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
}

#ifdef CONFIG_PROC_FS
4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082

#define BUCKET_SPACE (32 - NETDEV_HASHBITS)

struct dev_iter_state {
	struct seq_net_private p;
	unsigned int pos; /* bucket << BUCKET_SPACE + offset */
};

#define get_bucket(x) ((x) >> BUCKET_SPACE)
#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))

static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
{
	struct dev_iter_state *state = seq->private;
	struct net *net = seq_file_net(seq);
	struct net_device *dev;
	struct hlist_node *p;
	struct hlist_head *h;
	unsigned int count, bucket, offset;

	bucket = get_bucket(state->pos);
	offset = get_offset(state->pos);
	h = &net->dev_name_head[bucket];
	count = 0;
	hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
		if (count++ == offset) {
			state->pos = set_bucket_offset(bucket, count);
			return dev;
		}
	}

	return NULL;
}

static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
{
	struct dev_iter_state *state = seq->private;
	struct net_device *dev;
	unsigned int bucket;

	bucket = get_bucket(state->pos);
	do {
		dev = dev_from_same_bucket(seq);
		if (dev)
			return dev;

		bucket++;
		state->pos = set_bucket_offset(bucket, 0);
	} while (bucket < NETDEV_HASHENTRIES);

	return NULL;
}

L
Linus Torvalds 已提交
4083 4084 4085 4086
/*
 *	This is invoked by the /proc filesystem handler to display a device
 *	in detail.
 */
4087
void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4088
	__acquires(RCU)
L
Linus Torvalds 已提交
4089
{
4090
	struct dev_iter_state *state = seq->private;
L
Linus Torvalds 已提交
4091

4092
	rcu_read_lock();
4093 4094
	if (!*pos)
		return SEQ_START_TOKEN;
L
Linus Torvalds 已提交
4095

4096 4097 4098
	/* check for end of the hash */
	if (state->pos == 0 && *pos > 1)
		return NULL;
L
Linus Torvalds 已提交
4099

4100
	return dev_from_new_bucket(seq);
L
Linus Torvalds 已提交
4101 4102 4103 4104
}

void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
4105 4106 4107
	struct net_device *dev;

	++*pos;
E
Eric Dumazet 已提交
4108 4109

	if (v == SEQ_START_TOKEN)
4110
		return dev_from_new_bucket(seq);
4111

4112 4113 4114 4115 4116
	dev = dev_from_same_bucket(seq);
	if (dev)
		return dev;

	return dev_from_new_bucket(seq);
L
Linus Torvalds 已提交
4117 4118 4119
}

void dev_seq_stop(struct seq_file *seq, void *v)
4120
	__releases(RCU)
L
Linus Torvalds 已提交
4121
{
4122
	rcu_read_unlock();
L
Linus Torvalds 已提交
4123 4124 4125 4126
}

static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
{
4127 4128
	struct rtnl_link_stats64 temp;
	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
L
Linus Torvalds 已提交
4129

4130 4131
	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146
		   dev->name, stats->rx_bytes, stats->rx_packets,
		   stats->rx_errors,
		   stats->rx_dropped + stats->rx_missed_errors,
		   stats->rx_fifo_errors,
		   stats->rx_length_errors + stats->rx_over_errors +
		    stats->rx_crc_errors + stats->rx_frame_errors,
		   stats->rx_compressed, stats->multicast,
		   stats->tx_bytes, stats->tx_packets,
		   stats->tx_errors, stats->tx_dropped,
		   stats->tx_fifo_errors, stats->collisions,
		   stats->tx_carrier_errors +
		    stats->tx_aborted_errors +
		    stats->tx_window_errors +
		    stats->tx_heartbeat_errors,
		   stats->tx_compressed);
L
Linus Torvalds 已提交
4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165
}

/*
 *	Called from the PROCfs module. This now uses the new arbitrary sized
 *	/proc/net interface to create /proc/net/dev
 */
static int dev_seq_show(struct seq_file *seq, void *v)
{
	if (v == SEQ_START_TOKEN)
		seq_puts(seq, "Inter-|   Receive                            "
			      "                    |  Transmit\n"
			      " face |bytes    packets errs drop fifo frame "
			      "compressed multicast|bytes    packets errs "
			      "drop fifo colls carrier compressed\n");
	else
		dev_seq_printf_stats(seq, v);
	return 0;
}

C
Changli Gao 已提交
4166
static struct softnet_data *softnet_get_online(loff_t *pos)
L
Linus Torvalds 已提交
4167
{
C
Changli Gao 已提交
4168
	struct softnet_data *sd = NULL;
L
Linus Torvalds 已提交
4169

4170
	while (*pos < nr_cpu_ids)
4171
		if (cpu_online(*pos)) {
C
Changli Gao 已提交
4172
			sd = &per_cpu(softnet_data, *pos);
L
Linus Torvalds 已提交
4173 4174 4175
			break;
		} else
			++*pos;
C
Changli Gao 已提交
4176
	return sd;
L
Linus Torvalds 已提交
4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195
}

static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
{
	return softnet_get_online(pos);
}

static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return softnet_get_online(pos);
}

static void softnet_seq_stop(struct seq_file *seq, void *v)
{
}

static int softnet_seq_show(struct seq_file *seq, void *v)
{
C
Changli Gao 已提交
4196
	struct softnet_data *sd = v;
L
Linus Torvalds 已提交
4197

T
Tom Herbert 已提交
4198
	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
C
Changli Gao 已提交
4199
		   sd->processed, sd->dropped, sd->time_squeeze, 0,
4200
		   0, 0, 0, 0, /* was fastroute */
C
Changli Gao 已提交
4201
		   sd->cpu_collision, sd->received_rps);
L
Linus Torvalds 已提交
4202 4203 4204
	return 0;
}

4205
static const struct seq_operations dev_seq_ops = {
L
Linus Torvalds 已提交
4206 4207 4208 4209 4210 4211 4212 4213
	.start = dev_seq_start,
	.next  = dev_seq_next,
	.stop  = dev_seq_stop,
	.show  = dev_seq_show,
};

static int dev_seq_open(struct inode *inode, struct file *file)
{
4214
	return seq_open_net(inode, file, &dev_seq_ops,
4215
			    sizeof(struct dev_iter_state));
L
Linus Torvalds 已提交
4216 4217
}

4218 4219 4220 4221 4222 4223
int dev_seq_open_ops(struct inode *inode, struct file *file,
		     const struct seq_operations *ops)
{
	return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
}

4224
static const struct file_operations dev_seq_fops = {
L
Linus Torvalds 已提交
4225 4226 4227 4228
	.owner	 = THIS_MODULE,
	.open    = dev_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
4229
	.release = seq_release_net,
L
Linus Torvalds 已提交
4230 4231
};

4232
static const struct seq_operations softnet_seq_ops = {
L
Linus Torvalds 已提交
4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243
	.start = softnet_seq_start,
	.next  = softnet_seq_next,
	.stop  = softnet_seq_stop,
	.show  = softnet_seq_show,
};

static int softnet_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &softnet_seq_ops);
}

4244
static const struct file_operations softnet_seq_fops = {
L
Linus Torvalds 已提交
4245 4246 4247 4248 4249 4250 4251
	.owner	 = THIS_MODULE,
	.open    = softnet_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263
static void *ptype_get_idx(loff_t pos)
{
	struct packet_type *pt = NULL;
	loff_t i = 0;
	int t;

	list_for_each_entry_rcu(pt, &ptype_all, list) {
		if (i == pos)
			return pt;
		++i;
	}

4264
	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4265 4266 4267 4268 4269 4270 4271 4272 4273 4274
		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
			if (i == pos)
				return pt;
			++i;
		}
	}
	return NULL;
}

static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4275
	__acquires(RCU)
4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298
{
	rcu_read_lock();
	return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
}

static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct packet_type *pt;
	struct list_head *nxt;
	int hash;

	++*pos;
	if (v == SEQ_START_TOKEN)
		return ptype_get_idx(0);

	pt = v;
	nxt = pt->list.next;
	if (pt->type == htons(ETH_P_ALL)) {
		if (nxt != &ptype_all)
			goto found;
		hash = 0;
		nxt = ptype_base[0].next;
	} else
4299
		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4300 4301

	while (nxt == &ptype_base[hash]) {
4302
		if (++hash >= PTYPE_HASH_SIZE)
4303 4304 4305 4306 4307 4308 4309 4310
			return NULL;
		nxt = ptype_base[hash].next;
	}
found:
	return list_entry(nxt, struct packet_type, list);
}

static void ptype_seq_stop(struct seq_file *seq, void *v)
4311
	__releases(RCU)
4312 4313 4314 4315 4316 4317 4318 4319 4320 4321
{
	rcu_read_unlock();
}

static int ptype_seq_show(struct seq_file *seq, void *v)
{
	struct packet_type *pt = v;

	if (v == SEQ_START_TOKEN)
		seq_puts(seq, "Type Device      Function\n");
4322
	else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4323 4324 4325 4326 4327
		if (pt->type == htons(ETH_P_ALL))
			seq_puts(seq, "ALL ");
		else
			seq_printf(seq, "%04x", ntohs(pt->type));

A
Alexey Dobriyan 已提交
4328 4329
		seq_printf(seq, " %-8s %pF\n",
			   pt->dev ? pt->dev->name : "", pt->func);
4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343
	}

	return 0;
}

static const struct seq_operations ptype_seq_ops = {
	.start = ptype_seq_start,
	.next  = ptype_seq_next,
	.stop  = ptype_seq_stop,
	.show  = ptype_seq_show,
};

static int ptype_seq_open(struct inode *inode, struct file *file)
{
4344 4345
	return seq_open_net(inode, file, &ptype_seq_ops,
			sizeof(struct seq_net_private));
4346 4347 4348 4349 4350 4351 4352
}

static const struct file_operations ptype_seq_fops = {
	.owner	 = THIS_MODULE,
	.open    = ptype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
4353
	.release = seq_release_net,
4354 4355 4356
};


4357
static int __net_init dev_proc_net_init(struct net *net)
L
Linus Torvalds 已提交
4358 4359 4360
{
	int rc = -ENOMEM;

4361
	if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
L
Linus Torvalds 已提交
4362
		goto out;
4363
	if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
L
Linus Torvalds 已提交
4364
		goto out_dev;
4365
	if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4366
		goto out_softnet;
4367

4368
	if (wext_proc_init(net))
4369
		goto out_ptype;
L
Linus Torvalds 已提交
4370 4371 4372
	rc = 0;
out:
	return rc;
4373
out_ptype:
4374
	proc_net_remove(net, "ptype");
L
Linus Torvalds 已提交
4375
out_softnet:
4376
	proc_net_remove(net, "softnet_stat");
L
Linus Torvalds 已提交
4377
out_dev:
4378
	proc_net_remove(net, "dev");
L
Linus Torvalds 已提交
4379 4380
	goto out;
}
4381

4382
static void __net_exit dev_proc_net_exit(struct net *net)
4383 4384 4385 4386 4387 4388 4389 4390
{
	wext_proc_exit(net);

	proc_net_remove(net, "ptype");
	proc_net_remove(net, "softnet_stat");
	proc_net_remove(net, "dev");
}

4391
static struct pernet_operations __net_initdata dev_proc_ops = {
4392 4393 4394 4395 4396 4397 4398 4399
	.init = dev_proc_net_init,
	.exit = dev_proc_net_exit,
};

static int __init dev_proc_init(void)
{
	return register_pernet_subsys(&dev_proc_ops);
}
L
Linus Torvalds 已提交
4400 4401 4402 4403 4404 4405
#else
#define dev_proc_init() 0
#endif	/* CONFIG_PROC_FS */


/**
J
Jiri Pirko 已提交
4406
 *	netdev_set_master	-	set up master pointer
L
Linus Torvalds 已提交
4407 4408 4409 4410 4411 4412
 *	@slave: slave device
 *	@master: new master device
 *
 *	Changes the master device of the slave. Pass %NULL to break the
 *	bonding. The caller must hold the RTNL semaphore. On a failure
 *	a negative errno code is returned. On success the reference counts
J
Jiri Pirko 已提交
4413
 *	are adjusted and the function returns zero.
L
Linus Torvalds 已提交
4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427
 */
int netdev_set_master(struct net_device *slave, struct net_device *master)
{
	struct net_device *old = slave->master;

	ASSERT_RTNL();

	if (master) {
		if (old)
			return -EBUSY;
		dev_hold(master);
	}

	slave->master = master;
4428

4429
	if (old)
L
Linus Torvalds 已提交
4430
		dev_put(old);
J
Jiri Pirko 已提交
4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453
	return 0;
}
EXPORT_SYMBOL(netdev_set_master);

/**
 *	netdev_set_bond_master	-	set up bonding master/slave pair
 *	@slave: slave device
 *	@master: new master device
 *
 *	Changes the master device of the slave. Pass %NULL to break the
 *	bonding. The caller must hold the RTNL semaphore. On a failure
 *	a negative errno code is returned. On success %RTM_NEWLINK is sent
 *	to the routing socket and the function returns zero.
 */
int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
{
	int err;

	ASSERT_RTNL();

	err = netdev_set_master(slave, master);
	if (err)
		return err;
L
Linus Torvalds 已提交
4454 4455 4456 4457 4458 4459 4460 4461
	if (master)
		slave->flags |= IFF_SLAVE;
	else
		slave->flags &= ~IFF_SLAVE;

	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
	return 0;
}
J
Jiri Pirko 已提交
4462
EXPORT_SYMBOL(netdev_set_bond_master);
L
Linus Torvalds 已提交
4463

4464 4465
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
4466 4467 4468 4469
	const struct net_device_ops *ops = dev->netdev_ops;

	if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
		ops->ndo_change_rx_flags(dev, flags);
4470 4471
}

4472
static int __dev_set_promiscuity(struct net_device *dev, int inc)
L
Linus Torvalds 已提交
4473
{
4474
	unsigned int old_flags = dev->flags;
4475 4476
	uid_t uid;
	gid_t gid;
L
Linus Torvalds 已提交
4477

4478 4479
	ASSERT_RTNL();

4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490
	dev->flags |= IFF_PROMISC;
	dev->promiscuity += inc;
	if (dev->promiscuity == 0) {
		/*
		 * Avoid overflow.
		 * If inc causes overflow, untouch promisc and return error.
		 */
		if (inc < 0)
			dev->flags &= ~IFF_PROMISC;
		else {
			dev->promiscuity -= inc;
4491 4492
			pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
				dev->name);
4493 4494 4495
			return -EOVERFLOW;
		}
	}
4496
	if (dev->flags != old_flags) {
4497 4498 4499
		pr_info("device %s %s promiscuous mode\n",
			dev->name,
			dev->flags & IFF_PROMISC ? "entered" : "left");
4500 4501
		if (audit_enabled) {
			current_uid_gid(&uid, &gid);
4502 4503 4504 4505 4506 4507
			audit_log(current->audit_context, GFP_ATOMIC,
				AUDIT_ANOM_PROMISCUOUS,
				"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
				dev->name, (dev->flags & IFF_PROMISC),
				(old_flags & IFF_PROMISC),
				audit_get_loginuid(current),
4508
				uid, gid,
4509
				audit_get_sessionid(current));
4510
		}
4511

4512
		dev_change_rx_flags(dev, IFF_PROMISC);
L
Linus Torvalds 已提交
4513
	}
4514
	return 0;
L
Linus Torvalds 已提交
4515 4516
}

4517 4518 4519 4520 4521 4522 4523 4524 4525
/**
 *	dev_set_promiscuity	- update promiscuity count on a device
 *	@dev: device
 *	@inc: modifier
 *
 *	Add or remove promiscuity from a device. While the count in the device
 *	remains above zero the interface remains promiscuous. Once it hits zero
 *	the device reverts back to normal filtering operation. A negative inc
 *	value is used to drop promiscuity on the device.
4526
 *	Return 0 if successful or a negative errno code on error.
4527
 */
4528
int dev_set_promiscuity(struct net_device *dev, int inc)
4529
{
4530
	unsigned int old_flags = dev->flags;
4531
	int err;
4532

4533
	err = __dev_set_promiscuity(dev, inc);
4534
	if (err < 0)
4535
		return err;
4536 4537
	if (dev->flags != old_flags)
		dev_set_rx_mode(dev);
4538
	return err;
4539
}
E
Eric Dumazet 已提交
4540
EXPORT_SYMBOL(dev_set_promiscuity);
4541

L
Linus Torvalds 已提交
4542 4543 4544 4545 4546 4547 4548 4549 4550 4551
/**
 *	dev_set_allmulti	- update allmulti count on a device
 *	@dev: device
 *	@inc: modifier
 *
 *	Add or remove reception of all multicast frames to a device. While the
 *	count in the device remains above zero the interface remains listening
 *	to all interfaces. Once it hits zero the device reverts back to normal
 *	filtering operation. A negative @inc value is used to drop the counter
 *	when releasing a resource needing all multicasts.
4552
 *	Return 0 if successful or a negative errno code on error.
L
Linus Torvalds 已提交
4553 4554
 */

4555
int dev_set_allmulti(struct net_device *dev, int inc)
L
Linus Torvalds 已提交
4556
{
4557
	unsigned int old_flags = dev->flags;
L
Linus Torvalds 已提交
4558

4559 4560
	ASSERT_RTNL();

L
Linus Torvalds 已提交
4561
	dev->flags |= IFF_ALLMULTI;
4562 4563 4564 4565 4566 4567 4568 4569 4570 4571
	dev->allmulti += inc;
	if (dev->allmulti == 0) {
		/*
		 * Avoid overflow.
		 * If inc causes overflow, untouch allmulti and return error.
		 */
		if (inc < 0)
			dev->flags &= ~IFF_ALLMULTI;
		else {
			dev->allmulti -= inc;
4572 4573
			pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
				dev->name);
4574 4575 4576
			return -EOVERFLOW;
		}
	}
4577
	if (dev->flags ^ old_flags) {
4578
		dev_change_rx_flags(dev, IFF_ALLMULTI);
4579
		dev_set_rx_mode(dev);
4580
	}
4581
	return 0;
4582
}
E
Eric Dumazet 已提交
4583
EXPORT_SYMBOL(dev_set_allmulti);
4584 4585 4586 4587

/*
 *	Upload unicast and multicast address lists to device and
 *	configure RX filtering. When the device doesn't support unicast
J
Joe Perches 已提交
4588
 *	filtering it is put in promiscuous mode while unicast addresses
4589 4590 4591 4592
 *	are present.
 */
void __dev_set_rx_mode(struct net_device *dev)
{
4593 4594
	const struct net_device_ops *ops = dev->netdev_ops;

4595 4596 4597 4598 4599
	/* dev_open will call this function so the list will stay sane. */
	if (!(dev->flags&IFF_UP))
		return;

	if (!netif_device_present(dev))
4600
		return;
4601

4602
	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4603 4604 4605
		/* Unicast addresses changes may only happen under the rtnl,
		 * therefore calling __dev_set_promiscuity here is safe.
		 */
4606
		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4607
			__dev_set_promiscuity(dev, 1);
4608
			dev->uc_promisc = true;
4609
		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4610
			__dev_set_promiscuity(dev, -1);
4611
			dev->uc_promisc = false;
4612 4613
		}
	}
4614 4615 4616

	if (ops->ndo_set_rx_mode)
		ops->ndo_set_rx_mode(dev);
4617 4618 4619 4620
}

void dev_set_rx_mode(struct net_device *dev)
{
4621
	netif_addr_lock_bh(dev);
4622
	__dev_set_rx_mode(dev);
4623
	netif_addr_unlock_bh(dev);
L
Linus Torvalds 已提交
4624 4625
}

4626 4627 4628 4629 4630 4631
/**
 *	dev_get_flags - get flags reported to userspace
 *	@dev: device
 *
 *	Get the combination of flag bits exported through APIs to userspace.
 */
L
Linus Torvalds 已提交
4632 4633 4634 4635 4636 4637
unsigned dev_get_flags(const struct net_device *dev)
{
	unsigned flags;

	flags = (dev->flags & ~(IFF_PROMISC |
				IFF_ALLMULTI |
S
Stefan Rompf 已提交
4638 4639 4640
				IFF_RUNNING |
				IFF_LOWER_UP |
				IFF_DORMANT)) |
L
Linus Torvalds 已提交
4641 4642 4643
		(dev->gflags & (IFF_PROMISC |
				IFF_ALLMULTI));

S
Stefan Rompf 已提交
4644 4645 4646 4647 4648 4649 4650 4651
	if (netif_running(dev)) {
		if (netif_oper_up(dev))
			flags |= IFF_RUNNING;
		if (netif_carrier_ok(dev))
			flags |= IFF_LOWER_UP;
		if (netif_dormant(dev))
			flags |= IFF_DORMANT;
	}
L
Linus Torvalds 已提交
4652 4653 4654

	return flags;
}
E
Eric Dumazet 已提交
4655
EXPORT_SYMBOL(dev_get_flags);
L
Linus Torvalds 已提交
4656

4657
int __dev_change_flags(struct net_device *dev, unsigned int flags)
L
Linus Torvalds 已提交
4658
{
4659
	unsigned int old_flags = dev->flags;
4660
	int ret;
L
Linus Torvalds 已提交
4661

4662 4663
	ASSERT_RTNL();

L
Linus Torvalds 已提交
4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677
	/*
	 *	Set the flags on our device.
	 */

	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
			       IFF_AUTOMEDIA)) |
		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
				    IFF_ALLMULTI));

	/*
	 *	Load in the correct multicast list now the flags have changed.
	 */

4678 4679
	if ((old_flags ^ flags) & IFF_MULTICAST)
		dev_change_rx_flags(dev, IFF_MULTICAST);
4680

4681
	dev_set_rx_mode(dev);
L
Linus Torvalds 已提交
4682 4683 4684 4685 4686 4687 4688 4689 4690

	/*
	 *	Have we downed the interface. We handle IFF_UP ourselves
	 *	according to user attempts to set it, rather than blindly
	 *	setting it.
	 */

	ret = 0;
	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */
4691
		ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
L
Linus Torvalds 已提交
4692 4693

		if (!ret)
4694
			dev_set_rx_mode(dev);
L
Linus Torvalds 已提交
4695 4696 4697
	}

	if ((flags ^ dev->gflags) & IFF_PROMISC) {
E
Eric Dumazet 已提交
4698 4699
		int inc = (flags & IFF_PROMISC) ? 1 : -1;

L
Linus Torvalds 已提交
4700 4701 4702 4703 4704 4705 4706 4707 4708
		dev->gflags ^= IFF_PROMISC;
		dev_set_promiscuity(dev, inc);
	}

	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
	   is important. Some (broken) drivers set IFF_PROMISC, when
	   IFF_ALLMULTI is requested not asking us and not reporting.
	 */
	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
E
Eric Dumazet 已提交
4709 4710
		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;

L
Linus Torvalds 已提交
4711 4712 4713 4714
		dev->gflags ^= IFF_ALLMULTI;
		dev_set_allmulti(dev, inc);
	}

4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741
	return ret;
}

void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
{
	unsigned int changes = dev->flags ^ old_flags;

	if (changes & IFF_UP) {
		if (dev->flags & IFF_UP)
			call_netdevice_notifiers(NETDEV_UP, dev);
		else
			call_netdevice_notifiers(NETDEV_DOWN, dev);
	}

	if (dev->flags & IFF_UP &&
	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
		call_netdevice_notifiers(NETDEV_CHANGE, dev);
}

/**
 *	dev_change_flags - change device settings
 *	@dev: device
 *	@flags: device state flags
 *
 *	Change settings on device based state flags. The flags are
 *	in the userspace exported format.
 */
4742
int dev_change_flags(struct net_device *dev, unsigned int flags)
4743
{
4744 4745
	int ret;
	unsigned int changes, old_flags = dev->flags;
4746 4747 4748 4749 4750 4751

	ret = __dev_change_flags(dev, flags);
	if (ret < 0)
		return ret;

	changes = old_flags ^ dev->flags;
4752 4753
	if (changes)
		rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
L
Linus Torvalds 已提交
4754

4755
	__dev_notify_flags(dev, old_flags);
L
Linus Torvalds 已提交
4756 4757
	return ret;
}
E
Eric Dumazet 已提交
4758
EXPORT_SYMBOL(dev_change_flags);
L
Linus Torvalds 已提交
4759

4760 4761 4762 4763 4764 4765 4766
/**
 *	dev_set_mtu - Change maximum transfer unit
 *	@dev: device
 *	@new_mtu: new transfer unit
 *
 *	Change the maximum transfer size of the network device.
 */
L
Linus Torvalds 已提交
4767 4768
int dev_set_mtu(struct net_device *dev, int new_mtu)
{
4769
	const struct net_device_ops *ops = dev->netdev_ops;
L
Linus Torvalds 已提交
4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782
	int err;

	if (new_mtu == dev->mtu)
		return 0;

	/*	MTU must be positive.	 */
	if (new_mtu < 0)
		return -EINVAL;

	if (!netif_device_present(dev))
		return -ENODEV;

	err = 0;
4783 4784
	if (ops->ndo_change_mtu)
		err = ops->ndo_change_mtu(dev, new_mtu);
L
Linus Torvalds 已提交
4785 4786
	else
		dev->mtu = new_mtu;
4787

L
Linus Torvalds 已提交
4788
	if (!err && dev->flags & IFF_UP)
4789
		call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
L
Linus Torvalds 已提交
4790 4791
	return err;
}
E
Eric Dumazet 已提交
4792
EXPORT_SYMBOL(dev_set_mtu);
L
Linus Torvalds 已提交
4793

4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804
/**
 *	dev_set_group - Change group this device belongs to
 *	@dev: device
 *	@new_group: group this device should belong to
 */
void dev_set_group(struct net_device *dev, int new_group)
{
	dev->group = new_group;
}
EXPORT_SYMBOL(dev_set_group);

4805 4806 4807 4808 4809 4810 4811
/**
 *	dev_set_mac_address - Change Media Access Control Address
 *	@dev: device
 *	@sa: new address
 *
 *	Change the hardware (MAC) address of the device
 */
L
Linus Torvalds 已提交
4812 4813
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
{
4814
	const struct net_device_ops *ops = dev->netdev_ops;
L
Linus Torvalds 已提交
4815 4816
	int err;

4817
	if (!ops->ndo_set_mac_address)
L
Linus Torvalds 已提交
4818 4819 4820 4821 4822
		return -EOPNOTSUPP;
	if (sa->sa_family != dev->type)
		return -EINVAL;
	if (!netif_device_present(dev))
		return -ENODEV;
4823
	err = ops->ndo_set_mac_address(dev, sa);
L
Linus Torvalds 已提交
4824
	if (!err)
4825
		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
L
Linus Torvalds 已提交
4826 4827
	return err;
}
E
Eric Dumazet 已提交
4828
EXPORT_SYMBOL(dev_set_mac_address);
L
Linus Torvalds 已提交
4829 4830

/*
E
Eric Dumazet 已提交
4831
 *	Perform the SIOCxIFxxx calls, inside rcu_read_lock()
L
Linus Torvalds 已提交
4832
 */
4833
static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
L
Linus Torvalds 已提交
4834 4835
{
	int err;
E
Eric Dumazet 已提交
4836
	struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
L
Linus Torvalds 已提交
4837 4838 4839 4840 4841

	if (!dev)
		return -ENODEV;

	switch (cmd) {
E
Eric Dumazet 已提交
4842 4843 4844
	case SIOCGIFFLAGS:	/* Get interface flags */
		ifr->ifr_flags = (short) dev_get_flags(dev);
		return 0;
L
Linus Torvalds 已提交
4845

E
Eric Dumazet 已提交
4846 4847 4848 4849
	case SIOCGIFMETRIC:	/* Get the metric on the interface
				   (currently unused) */
		ifr->ifr_metric = 0;
		return 0;
L
Linus Torvalds 已提交
4850

E
Eric Dumazet 已提交
4851 4852 4853
	case SIOCGIFMTU:	/* Get the MTU of a device */
		ifr->ifr_mtu = dev->mtu;
		return 0;
L
Linus Torvalds 已提交
4854

E
Eric Dumazet 已提交
4855 4856 4857 4858 4859 4860 4861 4862
	case SIOCGIFHWADDR:
		if (!dev->addr_len)
			memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
		else
			memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
			       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
		ifr->ifr_hwaddr.sa_family = dev->type;
		return 0;
L
Linus Torvalds 已提交
4863

E
Eric Dumazet 已提交
4864 4865 4866
	case SIOCGIFSLAVE:
		err = -EINVAL;
		break;
4867

E
Eric Dumazet 已提交
4868 4869 4870 4871 4872 4873 4874 4875
	case SIOCGIFMAP:
		ifr->ifr_map.mem_start = dev->mem_start;
		ifr->ifr_map.mem_end   = dev->mem_end;
		ifr->ifr_map.base_addr = dev->base_addr;
		ifr->ifr_map.irq       = dev->irq;
		ifr->ifr_map.dma       = dev->dma;
		ifr->ifr_map.port      = dev->if_port;
		return 0;
4876

E
Eric Dumazet 已提交
4877 4878 4879
	case SIOCGIFINDEX:
		ifr->ifr_ifindex = dev->ifindex;
		return 0;
4880

E
Eric Dumazet 已提交
4881 4882 4883
	case SIOCGIFTXQLEN:
		ifr->ifr_qlen = dev->tx_queue_len;
		return 0;
4884

E
Eric Dumazet 已提交
4885 4886 4887 4888 4889
	default:
		/* dev_ioctl() should ensure this case
		 * is never reached
		 */
		WARN_ON(1);
4890
		err = -ENOTTY;
E
Eric Dumazet 已提交
4891
		break;
4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903

	}
	return err;
}

/*
 *	Perform the SIOCxIFxxx calls, inside rtnl_lock()
 */
static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
{
	int err;
	struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
J
Jarek Poplawski 已提交
4904
	const struct net_device_ops *ops;
4905 4906 4907 4908

	if (!dev)
		return -ENODEV;

J
Jarek Poplawski 已提交
4909 4910
	ops = dev->netdev_ops;

4911
	switch (cmd) {
E
Eric Dumazet 已提交
4912 4913
	case SIOCSIFFLAGS:	/* Set interface flags */
		return dev_change_flags(dev, ifr->ifr_flags);
4914

E
Eric Dumazet 已提交
4915 4916 4917
	case SIOCSIFMETRIC:	/* Set the metric on the interface
				   (currently unused) */
		return -EOPNOTSUPP;
4918

E
Eric Dumazet 已提交
4919 4920
	case SIOCSIFMTU:	/* Set the MTU of a device */
		return dev_set_mtu(dev, ifr->ifr_mtu);
L
Linus Torvalds 已提交
4921

E
Eric Dumazet 已提交
4922 4923
	case SIOCSIFHWADDR:
		return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
L
Linus Torvalds 已提交
4924

E
Eric Dumazet 已提交
4925 4926 4927 4928 4929 4930 4931
	case SIOCSIFHWBROADCAST:
		if (ifr->ifr_hwaddr.sa_family != dev->type)
			return -EINVAL;
		memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
		       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
		return 0;
L
Linus Torvalds 已提交
4932

E
Eric Dumazet 已提交
4933 4934
	case SIOCSIFMAP:
		if (ops->ndo_set_config) {
L
Linus Torvalds 已提交
4935 4936
			if (!netif_device_present(dev))
				return -ENODEV;
E
Eric Dumazet 已提交
4937 4938 4939
			return ops->ndo_set_config(dev, &ifr->ifr_map);
		}
		return -EOPNOTSUPP;
L
Linus Torvalds 已提交
4940

E
Eric Dumazet 已提交
4941
	case SIOCADDMULTI:
4942
		if (!ops->ndo_set_rx_mode ||
E
Eric Dumazet 已提交
4943 4944 4945 4946
		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
			return -EINVAL;
		if (!netif_device_present(dev))
			return -ENODEV;
4947
		return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
E
Eric Dumazet 已提交
4948 4949

	case SIOCDELMULTI:
4950
		if (!ops->ndo_set_rx_mode ||
E
Eric Dumazet 已提交
4951 4952 4953 4954
		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
			return -EINVAL;
		if (!netif_device_present(dev))
			return -ENODEV;
4955
		return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
L
Linus Torvalds 已提交
4956

E
Eric Dumazet 已提交
4957 4958 4959 4960 4961
	case SIOCSIFTXQLEN:
		if (ifr->ifr_qlen < 0)
			return -EINVAL;
		dev->tx_queue_len = ifr->ifr_qlen;
		return 0;
L
Linus Torvalds 已提交
4962

E
Eric Dumazet 已提交
4963 4964 4965
	case SIOCSIFNAME:
		ifr->ifr_newname[IFNAMSIZ-1] = '\0';
		return dev_change_name(dev, ifr->ifr_newname);
L
Linus Torvalds 已提交
4966

4967 4968 4969 4970 4971 4972
	case SIOCSHWTSTAMP:
		err = net_hwtstamp_validate(ifr);
		if (err)
			return err;
		/* fall through */

E
Eric Dumazet 已提交
4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000
	/*
	 *	Unknown or private ioctl
	 */
	default:
		if ((cmd >= SIOCDEVPRIVATE &&
		    cmd <= SIOCDEVPRIVATE + 15) ||
		    cmd == SIOCBONDENSLAVE ||
		    cmd == SIOCBONDRELEASE ||
		    cmd == SIOCBONDSETHWADDR ||
		    cmd == SIOCBONDSLAVEINFOQUERY ||
		    cmd == SIOCBONDINFOQUERY ||
		    cmd == SIOCBONDCHANGEACTIVE ||
		    cmd == SIOCGMIIPHY ||
		    cmd == SIOCGMIIREG ||
		    cmd == SIOCSMIIREG ||
		    cmd == SIOCBRADDIF ||
		    cmd == SIOCBRDELIF ||
		    cmd == SIOCSHWTSTAMP ||
		    cmd == SIOCWANDEV) {
			err = -EOPNOTSUPP;
			if (ops->ndo_do_ioctl) {
				if (netif_device_present(dev))
					err = ops->ndo_do_ioctl(dev, ifr, cmd);
				else
					err = -ENODEV;
			}
		} else
			err = -EINVAL;
L
Linus Torvalds 已提交
5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012

	}
	return err;
}

/*
 *	This function handles all "interface"-type I/O control requests. The actual
 *	'doing' part of this is dev_ifsioc above.
 */

/**
 *	dev_ioctl	-	network device ioctl
5013
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
5014 5015 5016 5017 5018 5019 5020 5021 5022
 *	@cmd: command to issue
 *	@arg: pointer to a struct ifreq in user space
 *
 *	Issue ioctl functions to devices. This is normally called by the
 *	user space syscall interfaces but can sometimes be useful for
 *	other purposes. The return value is the return from the syscall if
 *	positive or a negative errno code on error.
 */

5023
int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
L
Linus Torvalds 已提交
5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034
{
	struct ifreq ifr;
	int ret;
	char *colon;

	/* One special case: SIOCGIFCONF takes ifconf argument
	   and requires shared lock, because it sleeps writing
	   to user space.
	 */

	if (cmd == SIOCGIFCONF) {
5035
		rtnl_lock();
5036
		ret = dev_ifconf(net, (char __user *) arg);
5037
		rtnl_unlock();
L
Linus Torvalds 已提交
5038 5039 5040
		return ret;
	}
	if (cmd == SIOCGIFNAME)
5041
		return dev_ifname(net, (struct ifreq __user *)arg);
L
Linus Torvalds 已提交
5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056

	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
		return -EFAULT;

	ifr.ifr_name[IFNAMSIZ-1] = 0;

	colon = strchr(ifr.ifr_name, ':');
	if (colon)
		*colon = 0;

	/*
	 *	See which interface the caller is talking about.
	 */

	switch (cmd) {
E
Eric Dumazet 已提交
5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071
	/*
	 *	These ioctl calls:
	 *	- can be done by all.
	 *	- atomic and do not require locking.
	 *	- return a value
	 */
	case SIOCGIFFLAGS:
	case SIOCGIFMETRIC:
	case SIOCGIFMTU:
	case SIOCGIFHWADDR:
	case SIOCGIFSLAVE:
	case SIOCGIFMAP:
	case SIOCGIFINDEX:
	case SIOCGIFTXQLEN:
		dev_load(net, ifr.ifr_name);
E
Eric Dumazet 已提交
5072
		rcu_read_lock();
E
Eric Dumazet 已提交
5073
		ret = dev_ifsioc_locked(net, &ifr, cmd);
E
Eric Dumazet 已提交
5074
		rcu_read_unlock();
E
Eric Dumazet 已提交
5075 5076 5077 5078 5079 5080 5081 5082
		if (!ret) {
			if (colon)
				*colon = ':';
			if (copy_to_user(arg, &ifr,
					 sizeof(struct ifreq)))
				ret = -EFAULT;
		}
		return ret;
L
Linus Torvalds 已提交
5083

E
Eric Dumazet 已提交
5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096
	case SIOCETHTOOL:
		dev_load(net, ifr.ifr_name);
		rtnl_lock();
		ret = dev_ethtool(net, &ifr);
		rtnl_unlock();
		if (!ret) {
			if (colon)
				*colon = ':';
			if (copy_to_user(arg, &ifr,
					 sizeof(struct ifreq)))
				ret = -EFAULT;
		}
		return ret;
L
Linus Torvalds 已提交
5097

E
Eric Dumazet 已提交
5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120
	/*
	 *	These ioctl calls:
	 *	- require superuser power.
	 *	- require strict serialization.
	 *	- return a value
	 */
	case SIOCGMIIPHY:
	case SIOCGMIIREG:
	case SIOCSIFNAME:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		dev_load(net, ifr.ifr_name);
		rtnl_lock();
		ret = dev_ifsioc(net, &ifr, cmd);
		rtnl_unlock();
		if (!ret) {
			if (colon)
				*colon = ':';
			if (copy_to_user(arg, &ifr,
					 sizeof(struct ifreq)))
				ret = -EFAULT;
		}
		return ret;
L
Linus Torvalds 已提交
5121

E
Eric Dumazet 已提交
5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163
	/*
	 *	These ioctl calls:
	 *	- require superuser power.
	 *	- require strict serialization.
	 *	- do not return a value
	 */
	case SIOCSIFFLAGS:
	case SIOCSIFMETRIC:
	case SIOCSIFMTU:
	case SIOCSIFMAP:
	case SIOCSIFHWADDR:
	case SIOCSIFSLAVE:
	case SIOCADDMULTI:
	case SIOCDELMULTI:
	case SIOCSIFHWBROADCAST:
	case SIOCSIFTXQLEN:
	case SIOCSMIIREG:
	case SIOCBONDENSLAVE:
	case SIOCBONDRELEASE:
	case SIOCBONDSETHWADDR:
	case SIOCBONDCHANGEACTIVE:
	case SIOCBRADDIF:
	case SIOCBRDELIF:
	case SIOCSHWTSTAMP:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		/* fall through */
	case SIOCBONDSLAVEINFOQUERY:
	case SIOCBONDINFOQUERY:
		dev_load(net, ifr.ifr_name);
		rtnl_lock();
		ret = dev_ifsioc(net, &ifr, cmd);
		rtnl_unlock();
		return ret;

	case SIOCGIFMEM:
		/* Get the per device memory space. We can add this but
		 * currently do not support it */
	case SIOCSIFMEM:
		/* Set the per device memory buffer space.
		 * Not applicable in our case */
	case SIOCSIFLINK:
5164
		return -ENOTTY;
E
Eric Dumazet 已提交
5165 5166 5167 5168 5169 5170 5171 5172

	/*
	 *	Unknown or private ioctl.
	 */
	default:
		if (cmd == SIOCWANDEV ||
		    (cmd >= SIOCDEVPRIVATE &&
		     cmd <= SIOCDEVPRIVATE + 15)) {
5173
			dev_load(net, ifr.ifr_name);
L
Linus Torvalds 已提交
5174
			rtnl_lock();
5175
			ret = dev_ifsioc(net, &ifr, cmd);
L
Linus Torvalds 已提交
5176
			rtnl_unlock();
E
Eric Dumazet 已提交
5177 5178 5179
			if (!ret && copy_to_user(arg, &ifr,
						 sizeof(struct ifreq)))
				ret = -EFAULT;
L
Linus Torvalds 已提交
5180
			return ret;
E
Eric Dumazet 已提交
5181 5182 5183 5184
		}
		/* Take care of Wireless Extensions */
		if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
			return wext_handle_ioctl(net, &ifr, cmd, arg);
5185
		return -ENOTTY;
L
Linus Torvalds 已提交
5186 5187 5188 5189 5190 5191
	}
}


/**
 *	dev_new_index	-	allocate an ifindex
5192
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
5193 5194 5195 5196 5197
 *
 *	Returns a suitable unique value for a new device interface
 *	number.  The caller must hold the rtnl semaphore or the
 *	dev_base_lock to be sure it remains unique.
 */
5198
static int dev_new_index(struct net *net)
L
Linus Torvalds 已提交
5199 5200 5201 5202 5203
{
	static int ifindex;
	for (;;) {
		if (++ifindex <= 0)
			ifindex = 1;
5204
		if (!__dev_get_by_index(net, ifindex))
L
Linus Torvalds 已提交
5205 5206 5207 5208 5209
			return ifindex;
	}
}

/* Delayed registration/unregisteration */
5210
static LIST_HEAD(net_todo_list);
L
Linus Torvalds 已提交
5211

5212
static void net_set_todo(struct net_device *dev)
L
Linus Torvalds 已提交
5213 5214 5215 5216
{
	list_add_tail(&dev->todo_list, &net_todo_list);
}

5217
static void rollback_registered_many(struct list_head *head)
5218
{
5219
	struct net_device *dev, *tmp;
5220

5221 5222 5223
	BUG_ON(dev_boot_phase);
	ASSERT_RTNL();

5224
	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5225
		/* Some devices call without registering
5226 5227
		 * for initialization unwind. Remove those
		 * devices and proceed with the remaining.
5228 5229
		 */
		if (dev->reg_state == NETREG_UNINITIALIZED) {
5230 5231
			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
				 dev->name, dev);
5232

5233
			WARN_ON(1);
5234 5235
			list_del(&dev->unreg_list);
			continue;
5236
		}
5237
		dev->dismantle = true;
5238
		BUG_ON(dev->reg_state != NETREG_REGISTERED);
5239
	}
5240

5241 5242
	/* If device is running, close it first. */
	dev_close_many(head);
5243

5244
	list_for_each_entry(dev, head, unreg_list) {
5245 5246
		/* And unlink it from device chain. */
		unlist_netdevice(dev);
5247

5248 5249
		dev->reg_state = NETREG_UNREGISTERING;
	}
5250 5251 5252

	synchronize_net();

5253 5254 5255
	list_for_each_entry(dev, head, unreg_list) {
		/* Shutdown queueing discipline. */
		dev_shutdown(dev);
5256 5257


5258 5259 5260 5261
		/* Notify protocols, that we are about to destroy
		   this device. They should clean all the things.
		*/
		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5262

5263 5264 5265 5266
		if (!dev->rtnl_link_ops ||
		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);

5267 5268 5269
		/*
		 *	Flush the unicast and multicast chains
		 */
5270
		dev_uc_flush(dev);
5271
		dev_mc_flush(dev);
5272

5273 5274
		if (dev->netdev_ops->ndo_uninit)
			dev->netdev_ops->ndo_uninit(dev);
5275

5276 5277
		/* Notifier chain MUST detach us from master device. */
		WARN_ON(dev->master);
5278

5279 5280 5281
		/* Remove entries from kobject tree */
		netdev_unregister_kobject(dev);
	}
5282

5283
	/* Process any work delayed until the end of the batch */
5284
	dev = list_first_entry(head, struct net_device, unreg_list);
5285
	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5286

5287
	synchronize_net();
5288

5289
	list_for_each_entry(dev, head, unreg_list)
5290 5291 5292 5293 5294 5295 5296 5297 5298
		dev_put(dev);
}

static void rollback_registered(struct net_device *dev)
{
	LIST_HEAD(single);

	list_add(&dev->unreg_list, &single);
	rollback_registered_many(&single);
E
Eric Dumazet 已提交
5299
	list_del(&single);
5300 5301
}

5302 5303
static netdev_features_t netdev_fix_features(struct net_device *dev,
	netdev_features_t features)
5304
{
5305 5306 5307
	/* Fix illegal checksum combinations */
	if ((features & NETIF_F_HW_CSUM) &&
	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5308
		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5309 5310 5311
		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
	}

5312 5313 5314
	/* Fix illegal SG+CSUM combinations. */
	if ((features & NETIF_F_SG) &&
	    !(features & NETIF_F_ALL_CSUM)) {
5315 5316
		netdev_dbg(dev,
			"Dropping NETIF_F_SG since no checksum feature.\n");
5317 5318 5319 5320
		features &= ~NETIF_F_SG;
	}

	/* TSO requires that SG is present as well. */
5321
	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5322
		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5323
		features &= ~NETIF_F_ALL_TSO;
5324 5325
	}

5326 5327 5328 5329
	/* TSO ECN requires that TSO is present as well. */
	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
		features &= ~NETIF_F_TSO_ECN;

5330 5331
	/* Software GSO depends on SG. */
	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5332
		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5333 5334 5335
		features &= ~NETIF_F_GSO;
	}

5336
	/* UFO needs SG and checksumming */
5337
	if (features & NETIF_F_UFO) {
5338 5339 5340 5341
		/* maybe split UFO into V4 and V6? */
		if (!((features & NETIF_F_GEN_CSUM) ||
		    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
			    == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5342
			netdev_dbg(dev,
5343
				"Dropping NETIF_F_UFO since no checksum offload features.\n");
5344 5345 5346 5347
			features &= ~NETIF_F_UFO;
		}

		if (!(features & NETIF_F_SG)) {
5348
			netdev_dbg(dev,
5349
				"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5350 5351 5352 5353 5354 5355 5356
			features &= ~NETIF_F_UFO;
		}
	}

	return features;
}

5357
int __netdev_update_features(struct net_device *dev)
5358
{
5359
	netdev_features_t features;
5360 5361
	int err = 0;

5362 5363
	ASSERT_RTNL();

5364 5365 5366 5367 5368 5369 5370 5371 5372
	features = netdev_get_wanted_features(dev);

	if (dev->netdev_ops->ndo_fix_features)
		features = dev->netdev_ops->ndo_fix_features(dev, features);

	/* driver might be less strict about feature dependencies */
	features = netdev_fix_features(dev, features);

	if (dev->features == features)
5373
		return 0;
5374

5375 5376
	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
		&dev->features, &features);
5377 5378 5379 5380

	if (dev->netdev_ops->ndo_set_features)
		err = dev->netdev_ops->ndo_set_features(dev, features);

5381
	if (unlikely(err < 0)) {
5382
		netdev_err(dev,
5383 5384
			"set_features() failed (%d); wanted %pNF, left %pNF\n",
			err, &features, &dev->features);
5385 5386 5387 5388 5389 5390 5391 5392 5393
		return -1;
	}

	if (!err)
		dev->features = features;

	return 1;
}

5394 5395 5396 5397 5398 5399 5400 5401
/**
 *	netdev_update_features - recalculate device features
 *	@dev: the device to check
 *
 *	Recalculate dev->features set and send notifications if it
 *	has changed. Should be called after driver or hardware dependent
 *	conditions might have changed that influence the features.
 */
5402 5403 5404 5405
void netdev_update_features(struct net_device *dev)
{
	if (__netdev_update_features(dev))
		netdev_features_change(dev);
5406 5407 5408
}
EXPORT_SYMBOL(netdev_update_features);

5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425
/**
 *	netdev_change_features - recalculate device features
 *	@dev: the device to check
 *
 *	Recalculate dev->features set and send notifications even
 *	if they have not changed. Should be called instead of
 *	netdev_update_features() if also dev->vlan_features might
 *	have changed to allow the changes to be propagated to stacked
 *	VLAN devices.
 */
void netdev_change_features(struct net_device *dev)
{
	__netdev_update_features(dev);
	netdev_features_change(dev);
}
EXPORT_SYMBOL(netdev_change_features);

5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452
/**
 *	netif_stacked_transfer_operstate -	transfer operstate
 *	@rootdev: the root or lower level device to transfer state from
 *	@dev: the device to transfer operstate to
 *
 *	Transfer operational state from root to device. This is normally
 *	called when a stacking relationship exists between the root
 *	device and the device(a leaf device).
 */
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
					struct net_device *dev)
{
	if (rootdev->operstate == IF_OPER_DORMANT)
		netif_dormant_on(dev);
	else
		netif_dormant_off(dev);

	if (netif_carrier_ok(rootdev)) {
		if (!netif_carrier_ok(dev))
			netif_carrier_on(dev);
	} else {
		if (netif_carrier_ok(dev))
			netif_carrier_off(dev);
	}
}
EXPORT_SYMBOL(netif_stacked_transfer_operstate);

T
Tom Herbert 已提交
5453
#ifdef CONFIG_RPS
5454 5455 5456
static int netif_alloc_rx_queues(struct net_device *dev)
{
	unsigned int i, count = dev->num_rx_queues;
T
Tom Herbert 已提交
5457
	struct netdev_rx_queue *rx;
5458

T
Tom Herbert 已提交
5459
	BUG_ON(count < 1);
5460

T
Tom Herbert 已提交
5461 5462
	rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
	if (!rx) {
5463
		pr_err("netdev: Unable to allocate %u rx queues\n", count);
T
Tom Herbert 已提交
5464
		return -ENOMEM;
5465
	}
T
Tom Herbert 已提交
5466 5467 5468
	dev->_rx = rx;

	for (i = 0; i < count; i++)
T
Tom Herbert 已提交
5469
		rx[i].dev = dev;
5470 5471
	return 0;
}
T
Tom Herbert 已提交
5472
#endif
5473

C
Changli Gao 已提交
5474 5475 5476 5477 5478 5479 5480
static void netdev_init_one_queue(struct net_device *dev,
				  struct netdev_queue *queue, void *_unused)
{
	/* Initialize queue lock */
	spin_lock_init(&queue->_xmit_lock);
	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
	queue->xmit_lock_owner = -1;
5481
	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
C
Changli Gao 已提交
5482
	queue->dev = dev;
T
Tom Herbert 已提交
5483 5484 5485
#ifdef CONFIG_BQL
	dql_init(&queue->dql, HZ);
#endif
C
Changli Gao 已提交
5486 5487
}

5488 5489 5490 5491 5492 5493 5494 5495 5496
static int netif_alloc_netdev_queues(struct net_device *dev)
{
	unsigned int count = dev->num_tx_queues;
	struct netdev_queue *tx;

	BUG_ON(count < 1);

	tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
	if (!tx) {
5497
		pr_err("netdev: Unable to allocate %u tx queues\n", count);
5498 5499 5500
		return -ENOMEM;
	}
	dev->_tx = tx;
T
Tom Herbert 已提交
5501

5502 5503
	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
	spin_lock_init(&dev->tx_global_lock);
C
Changli Gao 已提交
5504 5505

	return 0;
5506 5507
}

L
Linus Torvalds 已提交
5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527
/**
 *	register_netdevice	- register a network device
 *	@dev: device to register
 *
 *	Take a completed network device structure and add it to the kernel
 *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
 *	chain. 0 is returned on success. A negative errno code is returned
 *	on a failure to set up the device, or if the name is a duplicate.
 *
 *	Callers must hold the rtnl semaphore. You may want
 *	register_netdev() instead of this.
 *
 *	BUGS:
 *	The locking appears insufficient to guarantee two parallel registers
 *	will not get the same name.
 */

int register_netdevice(struct net_device *dev)
{
	int ret;
5528
	struct net *net = dev_net(dev);
L
Linus Torvalds 已提交
5529 5530 5531 5532

	BUG_ON(dev_boot_phase);
	ASSERT_RTNL();

5533 5534
	might_sleep();

L
Linus Torvalds 已提交
5535 5536
	/* When net_device's are persistent, this will be fatal. */
	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5537
	BUG_ON(!net);
L
Linus Torvalds 已提交
5538

5539
	spin_lock_init(&dev->addr_list_lock);
5540
	netdev_set_addr_lockdep_class(dev);
L
Linus Torvalds 已提交
5541 5542 5543

	dev->iflink = -1;

5544 5545 5546 5547
	ret = dev_get_valid_name(dev, dev->name);
	if (ret < 0)
		goto out;

L
Linus Torvalds 已提交
5548
	/* Init, if this function is available */
5549 5550
	if (dev->netdev_ops->ndo_init) {
		ret = dev->netdev_ops->ndo_init(dev);
L
Linus Torvalds 已提交
5551 5552 5553
		if (ret) {
			if (ret > 0)
				ret = -EIO;
5554
			goto out;
L
Linus Torvalds 已提交
5555 5556
		}
	}
5557

5558
	dev->ifindex = dev_new_index(net);
L
Linus Torvalds 已提交
5559 5560 5561
	if (dev->iflink == -1)
		dev->iflink = dev->ifindex;

5562 5563 5564 5565
	/* Transfer changeable features to wanted_features and enable
	 * software offloads (GSO and GRO).
	 */
	dev->hw_features |= NETIF_F_SOFT_FEATURES;
5566 5567
	dev->features |= NETIF_F_SOFT_FEATURES;
	dev->wanted_features = dev->features & dev->hw_features;
L
Linus Torvalds 已提交
5568

5569
	/* Turn on no cache copy if HW is doing checksum */
5570 5571 5572 5573 5574 5575
	if (!(dev->flags & IFF_LOOPBACK)) {
		dev->hw_features |= NETIF_F_NOCACHE_COPY;
		if (dev->features & NETIF_F_ALL_CSUM) {
			dev->wanted_features |= NETIF_F_NOCACHE_COPY;
			dev->features |= NETIF_F_NOCACHE_COPY;
		}
5576 5577
	}

5578
	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5579
	 */
5580
	dev->vlan_features |= NETIF_F_HIGHDMA;
5581

5582 5583 5584 5585 5586
	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
	ret = notifier_to_errno(ret);
	if (ret)
		goto err_uninit;

5587
	ret = netdev_register_kobject(dev);
5588
	if (ret)
5589
		goto err_uninit;
5590 5591
	dev->reg_state = NETREG_REGISTERED;

5592
	__netdev_update_features(dev);
5593

L
Linus Torvalds 已提交
5594 5595 5596 5597 5598 5599 5600 5601 5602
	/*
	 *	Default initial state at registry is that the
	 *	device is present.
	 */

	set_bit(__LINK_STATE_PRESENT, &dev->state);

	dev_init_scheduler(dev);
	dev_hold(dev);
5603
	list_netdevice(dev);
L
Linus Torvalds 已提交
5604 5605

	/* Notify protocols, that a new device appeared. */
5606
	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5607
	ret = notifier_to_errno(ret);
5608 5609 5610 5611
	if (ret) {
		rollback_registered(dev);
		dev->reg_state = NETREG_UNREGISTERED;
	}
5612 5613 5614 5615
	/*
	 *	Prevent userspace races by waiting until the network
	 *	device is fully setup before sending notifications.
	 */
5616 5617 5618
	if (!dev->rtnl_link_ops ||
	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
L
Linus Torvalds 已提交
5619 5620 5621

out:
	return ret;
5622 5623

err_uninit:
5624 5625
	if (dev->netdev_ops->ndo_uninit)
		dev->netdev_ops->ndo_uninit(dev);
5626
	goto out;
L
Linus Torvalds 已提交
5627
}
E
Eric Dumazet 已提交
5628
EXPORT_SYMBOL(register_netdevice);
L
Linus Torvalds 已提交
5629

5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660
/**
 *	init_dummy_netdev	- init a dummy network device for NAPI
 *	@dev: device to init
 *
 *	This takes a network device structure and initialize the minimum
 *	amount of fields so it can be used to schedule NAPI polls without
 *	registering a full blown interface. This is to be used by drivers
 *	that need to tie several hardware interfaces to a single NAPI
 *	poll scheduler due to HW limitations.
 */
int init_dummy_netdev(struct net_device *dev)
{
	/* Clear everything. Note we don't initialize spinlocks
	 * are they aren't supposed to be taken by any of the
	 * NAPI code and this dummy netdev is supposed to be
	 * only ever used for NAPI polls
	 */
	memset(dev, 0, sizeof(struct net_device));

	/* make sure we BUG if trying to hit standard
	 * register/unregister code path
	 */
	dev->reg_state = NETREG_DUMMY;

	/* NAPI wants this */
	INIT_LIST_HEAD(&dev->napi_list);

	/* a dummy interface is started by default */
	set_bit(__LINK_STATE_PRESENT, &dev->state);
	set_bit(__LINK_STATE_START, &dev->state);

E
Eric Dumazet 已提交
5661 5662 5663 5664 5665
	/* Note : We dont allocate pcpu_refcnt for dummy devices,
	 * because users of this 'device' dont need to change
	 * its refcount.
	 */

5666 5667 5668 5669 5670
	return 0;
}
EXPORT_SYMBOL_GPL(init_dummy_netdev);


L
Linus Torvalds 已提交
5671 5672 5673 5674 5675 5676 5677 5678 5679
/**
 *	register_netdev	- register a network device
 *	@dev: device to register
 *
 *	Take a completed network device structure and add it to the kernel
 *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
 *	chain. 0 is returned on success. A negative errno code is returned
 *	on a failure to set up the device, or if the name is a duplicate.
 *
5680
 *	This is a wrapper around register_netdevice that takes the rtnl semaphore
L
Linus Torvalds 已提交
5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694
 *	and expands the device name if you passed a format string to
 *	alloc_netdev.
 */
int register_netdev(struct net_device *dev)
{
	int err;

	rtnl_lock();
	err = register_netdevice(dev);
	rtnl_unlock();
	return err;
}
EXPORT_SYMBOL(register_netdev);

E
Eric Dumazet 已提交
5695 5696 5697 5698 5699 5700 5701 5702 5703 5704
int netdev_refcnt_read(const struct net_device *dev)
{
	int i, refcnt = 0;

	for_each_possible_cpu(i)
		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
	return refcnt;
}
EXPORT_SYMBOL(netdev_refcnt_read);

L
Linus Torvalds 已提交
5705 5706 5707 5708 5709 5710 5711 5712 5713
/*
 * netdev_wait_allrefs - wait until all references are gone.
 *
 * This is called when unregistering network devices.
 *
 * Any protocol or device that holds a reference should register
 * for netdevice notification, and cleanup and put back the
 * reference if they receive an UNREGISTER event.
 * We can get stuck here if buggy protocols don't correctly
5714
 * call dev_put.
L
Linus Torvalds 已提交
5715 5716 5717 5718
 */
static void netdev_wait_allrefs(struct net_device *dev)
{
	unsigned long rebroadcast_time, warning_time;
E
Eric Dumazet 已提交
5719
	int refcnt;
L
Linus Torvalds 已提交
5720

5721 5722
	linkwatch_forget_dev(dev);

L
Linus Torvalds 已提交
5723
	rebroadcast_time = warning_time = jiffies;
E
Eric Dumazet 已提交
5724 5725 5726
	refcnt = netdev_refcnt_read(dev);

	while (refcnt != 0) {
L
Linus Torvalds 已提交
5727
		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5728
			rtnl_lock();
L
Linus Torvalds 已提交
5729 5730

			/* Rebroadcast unregister notification */
5731
			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5732
			/* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5733
			 * should have already handle it the first time */
L
Linus Torvalds 已提交
5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745

			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
				     &dev->state)) {
				/* We must not have linkwatch events
				 * pending on unregister. If this
				 * happens, we simply run the queue
				 * unscheduled, resulting in a noop
				 * for this device.
				 */
				linkwatch_run_queue();
			}

5746
			__rtnl_unlock();
L
Linus Torvalds 已提交
5747 5748 5749 5750 5751 5752

			rebroadcast_time = jiffies;
		}

		msleep(250);

E
Eric Dumazet 已提交
5753 5754
		refcnt = netdev_refcnt_read(dev);

L
Linus Torvalds 已提交
5755
		if (time_after(jiffies, warning_time + 10 * HZ)) {
5756 5757
			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
				 dev->name, refcnt);
L
Linus Torvalds 已提交
5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776
			warning_time = jiffies;
		}
	}
}

/* The sequence is:
 *
 *	rtnl_lock();
 *	...
 *	register_netdevice(x1);
 *	register_netdevice(x2);
 *	...
 *	unregister_netdevice(y1);
 *	unregister_netdevice(y2);
 *      ...
 *	rtnl_unlock();
 *	free_netdev(y1);
 *	free_netdev(y2);
 *
H
Herbert Xu 已提交
5777
 * We are invoked by rtnl_unlock().
L
Linus Torvalds 已提交
5778
 * This allows us to deal with problems:
5779
 * 1) We can delete sysfs objects which invoke hotplug
L
Linus Torvalds 已提交
5780 5781 5782
 *    without deadlocking with linkwatch via keventd.
 * 2) Since we run with the RTNL semaphore not held, we can sleep
 *    safely in order to wait for the netdev refcnt to drop to zero.
H
Herbert Xu 已提交
5783 5784 5785
 *
 * We must not return until all unregister events added during
 * the interval the lock was held have been completed.
L
Linus Torvalds 已提交
5786 5787 5788
 */
void netdev_run_todo(void)
{
5789
	struct list_head list;
L
Linus Torvalds 已提交
5790 5791

	/* Snapshot list, allow later requests */
5792
	list_replace_init(&net_todo_list, &list);
H
Herbert Xu 已提交
5793 5794

	__rtnl_unlock();
5795

5796 5797 5798 5799 5800 5801
	/* Wait for rcu callbacks to finish before attempting to drain
	 * the device list.  This usually avoids a 250ms wait.
	 */
	if (!list_empty(&list))
		rcu_barrier();

L
Linus Torvalds 已提交
5802 5803
	while (!list_empty(&list)) {
		struct net_device *dev
5804
			= list_first_entry(&list, struct net_device, todo_list);
L
Linus Torvalds 已提交
5805 5806
		list_del(&dev->todo_list);

5807
		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5808
			pr_err("network todo '%s' but state %d\n",
5809 5810 5811 5812
			       dev->name, dev->reg_state);
			dump_stack();
			continue;
		}
L
Linus Torvalds 已提交
5813

5814
		dev->reg_state = NETREG_UNREGISTERED;
L
Linus Torvalds 已提交
5815

5816
		on_each_cpu(flush_backlog, dev, 1);
5817

5818
		netdev_wait_allrefs(dev);
L
Linus Torvalds 已提交
5819

5820
		/* paranoia */
E
Eric Dumazet 已提交
5821
		BUG_ON(netdev_refcnt_read(dev));
5822 5823
		WARN_ON(rcu_access_pointer(dev->ip_ptr));
		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5824
		WARN_ON(dev->dn_ptr);
L
Linus Torvalds 已提交
5825

5826 5827
		if (dev->destructor)
			dev->destructor(dev);
5828 5829 5830

		/* Free network device */
		kobject_put(&dev->dev.kobj);
L
Linus Torvalds 已提交
5831 5832 5833
	}
}

5834 5835 5836
/* Convert net_device_stats to rtnl_link_stats64.  They have the same
 * fields in the same order, with only the type differing.
 */
5837 5838
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
			     const struct net_device_stats *netdev_stats)
5839 5840
{
#if BITS_PER_LONG == 64
5841 5842
	BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
	memcpy(stats64, netdev_stats, sizeof(*stats64));
5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853
#else
	size_t i, n = sizeof(*stats64) / sizeof(u64);
	const unsigned long *src = (const unsigned long *)netdev_stats;
	u64 *dst = (u64 *)stats64;

	BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
		     sizeof(*stats64) / sizeof(u64));
	for (i = 0; i < n; i++)
		dst[i] = src[i];
#endif
}
5854
EXPORT_SYMBOL(netdev_stats_to_stats64);
5855

5856 5857 5858
/**
 *	dev_get_stats	- get network device statistics
 *	@dev: device to get statistics from
5859
 *	@storage: place to store stats
5860
 *
5861 5862 5863 5864
 *	Get network statistics from device. Return @storage.
 *	The device driver may provide its own method by setting
 *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
 *	otherwise the internal statistics structure is used.
5865
 */
5866 5867
struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
					struct rtnl_link_stats64 *storage)
5868
{
5869 5870
	const struct net_device_ops *ops = dev->netdev_ops;

5871 5872
	if (ops->ndo_get_stats64) {
		memset(storage, 0, sizeof(*storage));
5873 5874
		ops->ndo_get_stats64(dev, storage);
	} else if (ops->ndo_get_stats) {
5875
		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5876 5877
	} else {
		netdev_stats_to_stats64(storage, &dev->stats);
5878
	}
5879
	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5880
	return storage;
R
Rusty Russell 已提交
5881
}
5882
EXPORT_SYMBOL(dev_get_stats);
R
Rusty Russell 已提交
5883

5884
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5885
{
5886
	struct netdev_queue *queue = dev_ingress_queue(dev);
5887

5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899
#ifdef CONFIG_NET_CLS_ACT
	if (queue)
		return queue;
	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
	if (!queue)
		return NULL;
	netdev_init_one_queue(dev, queue, NULL);
	queue->qdisc = &noop_qdisc;
	queue->qdisc_sleeping = &noop_qdisc;
	rcu_assign_pointer(dev->ingress_queue, queue);
#endif
	return queue;
5900 5901
}

L
Linus Torvalds 已提交
5902
/**
T
Tom Herbert 已提交
5903
 *	alloc_netdev_mqs - allocate network device
L
Linus Torvalds 已提交
5904 5905 5906
 *	@sizeof_priv:	size of private data to allocate space for
 *	@name:		device name format string
 *	@setup:		callback to initialize device
T
Tom Herbert 已提交
5907 5908
 *	@txqs:		the number of TX subqueues to allocate
 *	@rxqs:		the number of RX subqueues to allocate
L
Linus Torvalds 已提交
5909 5910
 *
 *	Allocates a struct net_device with private data area for driver use
5911
 *	and performs basic initialization.  Also allocates subquue structs
T
Tom Herbert 已提交
5912
 *	for each queue on the device.
L
Linus Torvalds 已提交
5913
 */
T
Tom Herbert 已提交
5914 5915 5916
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
		void (*setup)(struct net_device *),
		unsigned int txqs, unsigned int rxqs)
L
Linus Torvalds 已提交
5917 5918
{
	struct net_device *dev;
5919
	size_t alloc_size;
5920
	struct net_device *p;
L
Linus Torvalds 已提交
5921

5922 5923
	BUG_ON(strlen(name) >= sizeof(dev->name));

T
Tom Herbert 已提交
5924
	if (txqs < 1) {
5925
		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5926 5927 5928
		return NULL;
	}

T
Tom Herbert 已提交
5929 5930
#ifdef CONFIG_RPS
	if (rxqs < 1) {
5931
		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
T
Tom Herbert 已提交
5932 5933 5934 5935
		return NULL;
	}
#endif

5936
	alloc_size = sizeof(struct net_device);
5937 5938
	if (sizeof_priv) {
		/* ensure 32-byte alignment of private area */
5939
		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5940 5941 5942
		alloc_size += sizeof_priv;
	}
	/* ensure 32-byte alignment of whole construct */
5943
	alloc_size += NETDEV_ALIGN - 1;
L
Linus Torvalds 已提交
5944

5945
	p = kzalloc(alloc_size, GFP_KERNEL);
L
Linus Torvalds 已提交
5946
	if (!p) {
5947
		pr_err("alloc_netdev: Unable to allocate device\n");
L
Linus Torvalds 已提交
5948 5949 5950
		return NULL;
	}

5951
	dev = PTR_ALIGN(p, NETDEV_ALIGN);
L
Linus Torvalds 已提交
5952
	dev->padded = (char *)dev - (char *)p;
5953

E
Eric Dumazet 已提交
5954 5955
	dev->pcpu_refcnt = alloc_percpu(int);
	if (!dev->pcpu_refcnt)
5956
		goto free_p;
5957 5958

	if (dev_addr_init(dev))
E
Eric Dumazet 已提交
5959
		goto free_pcpu;
5960

5961
	dev_mc_init(dev);
5962
	dev_uc_init(dev);
J
Jiri Pirko 已提交
5963

5964
	dev_net_set(dev, &init_net);
L
Linus Torvalds 已提交
5965

5966 5967 5968 5969 5970 5971 5972 5973
	dev->gso_max_size = GSO_MAX_SIZE;

	INIT_LIST_HEAD(&dev->napi_list);
	INIT_LIST_HEAD(&dev->unreg_list);
	INIT_LIST_HEAD(&dev->link_watch_list);
	dev->priv_flags = IFF_XMIT_DST_RELEASE;
	setup(dev);

T
Tom Herbert 已提交
5974 5975
	dev->num_tx_queues = txqs;
	dev->real_num_tx_queues = txqs;
5976
	if (netif_alloc_netdev_queues(dev))
5977
		goto free_all;
5978

E
Eric Dumazet 已提交
5979
#ifdef CONFIG_RPS
T
Tom Herbert 已提交
5980 5981
	dev->num_rx_queues = rxqs;
	dev->real_num_rx_queues = rxqs;
T
Tom Herbert 已提交
5982
	if (netif_alloc_rx_queues(dev))
5983
		goto free_all;
E
Eric Dumazet 已提交
5984
#endif
T
Tom Herbert 已提交
5985

L
Linus Torvalds 已提交
5986
	strcpy(dev->name, name);
5987
	dev->group = INIT_NETDEV_GROUP;
L
Linus Torvalds 已提交
5988
	return dev;
5989

5990 5991 5992 5993
free_all:
	free_netdev(dev);
	return NULL;

E
Eric Dumazet 已提交
5994 5995
free_pcpu:
	free_percpu(dev->pcpu_refcnt);
5996
	kfree(dev->_tx);
T
Tom Herbert 已提交
5997 5998 5999 6000
#ifdef CONFIG_RPS
	kfree(dev->_rx);
#endif

6001 6002 6003
free_p:
	kfree(p);
	return NULL;
L
Linus Torvalds 已提交
6004
}
T
Tom Herbert 已提交
6005
EXPORT_SYMBOL(alloc_netdev_mqs);
L
Linus Torvalds 已提交
6006 6007 6008 6009 6010

/**
 *	free_netdev - free network device
 *	@dev: device
 *
6011 6012
 *	This function does the last stage of destroying an allocated device
 * 	interface. The reference to the device object is released.
L
Linus Torvalds 已提交
6013 6014 6015 6016
 *	If this is the last reference then it will be freed.
 */
void free_netdev(struct net_device *dev)
{
6017 6018
	struct napi_struct *p, *n;

6019 6020
	release_net(dev_net(dev));

6021
	kfree(dev->_tx);
T
Tom Herbert 已提交
6022 6023 6024
#ifdef CONFIG_RPS
	kfree(dev->_rx);
#endif
6025

6026
	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6027

6028 6029 6030
	/* Flush device addresses */
	dev_addr_flush(dev);

6031 6032 6033
	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
		netif_napi_del(p);

E
Eric Dumazet 已提交
6034 6035 6036
	free_percpu(dev->pcpu_refcnt);
	dev->pcpu_refcnt = NULL;

S
Stephen Hemminger 已提交
6037
	/*  Compatibility with error handling in drivers */
L
Linus Torvalds 已提交
6038 6039 6040 6041 6042 6043 6044 6045
	if (dev->reg_state == NETREG_UNINITIALIZED) {
		kfree((char *)dev - dev->padded);
		return;
	}

	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
	dev->reg_state = NETREG_RELEASED;

6046 6047
	/* will free via device release */
	put_device(&dev->dev);
L
Linus Torvalds 已提交
6048
}
E
Eric Dumazet 已提交
6049
EXPORT_SYMBOL(free_netdev);
6050

6051 6052 6053 6054 6055 6056
/**
 *	synchronize_net -  Synchronize with packet receive processing
 *
 *	Wait for packets currently being received to be done.
 *	Does not block later packets from starting.
 */
6057
void synchronize_net(void)
L
Linus Torvalds 已提交
6058 6059
{
	might_sleep();
6060 6061 6062 6063
	if (rtnl_is_locked())
		synchronize_rcu_expedited();
	else
		synchronize_rcu();
L
Linus Torvalds 已提交
6064
}
E
Eric Dumazet 已提交
6065
EXPORT_SYMBOL(synchronize_net);
L
Linus Torvalds 已提交
6066 6067

/**
6068
 *	unregister_netdevice_queue - remove device from the kernel
L
Linus Torvalds 已提交
6069
 *	@dev: device
6070
 *	@head: list
6071
 *
L
Linus Torvalds 已提交
6072
 *	This function shuts down a device interface and removes it
6073
 *	from the kernel tables.
6074
 *	If head not NULL, device is queued to be unregistered later.
L
Linus Torvalds 已提交
6075 6076 6077 6078 6079
 *
 *	Callers must hold the rtnl semaphore.  You may want
 *	unregister_netdev() instead of this.
 */

6080
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
L
Linus Torvalds 已提交
6081
{
6082 6083
	ASSERT_RTNL();

6084
	if (head) {
6085
		list_move_tail(&dev->unreg_list, head);
6086 6087 6088 6089 6090
	} else {
		rollback_registered(dev);
		/* Finish processing unregister after unlock */
		net_set_todo(dev);
	}
L
Linus Torvalds 已提交
6091
}
6092
EXPORT_SYMBOL(unregister_netdevice_queue);
L
Linus Torvalds 已提交
6093

6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107
/**
 *	unregister_netdevice_many - unregister many devices
 *	@head: list of devices
 */
void unregister_netdevice_many(struct list_head *head)
{
	struct net_device *dev;

	if (!list_empty(head)) {
		rollback_registered_many(head);
		list_for_each_entry(dev, head, unreg_list)
			net_set_todo(dev);
	}
}
6108
EXPORT_SYMBOL(unregister_netdevice_many);
6109

L
Linus Torvalds 已提交
6110 6111 6112 6113 6114
/**
 *	unregister_netdev - remove device from the kernel
 *	@dev: device
 *
 *	This function shuts down a device interface and removes it
6115
 *	from the kernel tables.
L
Linus Torvalds 已提交
6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128
 *
 *	This is just a wrapper for unregister_netdevice that takes
 *	the rtnl semaphore.  In general you want to use this and not
 *	unregister_netdevice.
 */
void unregister_netdev(struct net_device *dev)
{
	rtnl_lock();
	unregister_netdevice(dev);
	rtnl_unlock();
}
EXPORT_SYMBOL(unregister_netdev);

6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160
/**
 *	dev_change_net_namespace - move device to different nethost namespace
 *	@dev: device
 *	@net: network namespace
 *	@pat: If not NULL name pattern to try if the current device name
 *	      is already taken in the destination network namespace.
 *
 *	This function shuts down a device interface and moves it
 *	to a new network namespace. On success 0 is returned, on
 *	a failure a netagive errno code is returned.
 *
 *	Callers must hold the rtnl semaphore.
 */

int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
{
	int err;

	ASSERT_RTNL();

	/* Don't allow namespace local devices to be moved. */
	err = -EINVAL;
	if (dev->features & NETIF_F_NETNS_LOCAL)
		goto out;

	/* Ensure the device has been registrered */
	err = -EINVAL;
	if (dev->reg_state != NETREG_REGISTERED)
		goto out;

	/* Get out if there is nothing todo */
	err = 0;
6161
	if (net_eq(dev_net(dev), net))
6162 6163 6164 6165 6166 6167
		goto out;

	/* Pick the destination device name, and ensure
	 * we can use it in the destination network namespace.
	 */
	err = -EEXIST;
6168
	if (__dev_get_by_name(net, dev->name)) {
6169 6170 6171
		/* We get here if we can't use the current device name */
		if (!pat)
			goto out;
6172
		if (dev_get_valid_name(dev, pat) < 0)
6173 6174 6175 6176 6177 6178 6179 6180
			goto out;
	}

	/*
	 * And now a mini version of register_netdevice unregister_netdevice.
	 */

	/* If device is running close it first. */
6181
	dev_close(dev);
6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193

	/* And unlink it from device chain */
	err = -ENODEV;
	unlist_netdevice(dev);

	synchronize_net();

	/* Shutdown queueing discipline. */
	dev_shutdown(dev);

	/* Notify protocols, that we are about to destroy
	   this device. They should clean all the things.
6194 6195 6196 6197

	   Note that dev->reg_state stays at NETREG_REGISTERED.
	   This is wanted because this way 8021q and macvlan know
	   the device is just moving and can keep their slaves up.
6198 6199
	*/
	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6200
	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
6201
	rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6202 6203 6204 6205

	/*
	 *	Flush the unicast and multicast chains
	 */
6206
	dev_uc_flush(dev);
6207
	dev_mc_flush(dev);
6208 6209

	/* Actually switch the network namespace */
6210
	dev_net_set(dev, net);
6211 6212 6213 6214 6215 6216 6217 6218 6219

	/* If there is an ifindex conflict assign a new one */
	if (__dev_get_by_index(net, dev->ifindex)) {
		int iflink = (dev->iflink == dev->ifindex);
		dev->ifindex = dev_new_index(net);
		if (iflink)
			dev->iflink = dev->ifindex;
	}

6220
	/* Fixup kobjects */
6221
	err = device_rename(&dev->dev, dev->name);
6222
	WARN_ON(err);
6223 6224 6225 6226 6227 6228 6229

	/* Add the device back in the hashes */
	list_netdevice(dev);

	/* Notify protocols, that a new device appeared. */
	call_netdevice_notifiers(NETDEV_REGISTER, dev);

6230 6231 6232 6233 6234 6235
	/*
	 *	Prevent userspace races by waiting until the network
	 *	device is fully setup before sending notifications.
	 */
	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);

6236 6237 6238 6239 6240
	synchronize_net();
	err = 0;
out:
	return err;
}
6241
EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6242

L
Linus Torvalds 已提交
6243 6244 6245 6246 6247 6248 6249 6250 6251
static int dev_cpu_callback(struct notifier_block *nfb,
			    unsigned long action,
			    void *ocpu)
{
	struct sk_buff **list_skb;
	struct sk_buff *skb;
	unsigned int cpu, oldcpu = (unsigned long)ocpu;
	struct softnet_data *sd, *oldsd;

6252
	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
L
Linus Torvalds 已提交
6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268
		return NOTIFY_OK;

	local_irq_disable();
	cpu = smp_processor_id();
	sd = &per_cpu(softnet_data, cpu);
	oldsd = &per_cpu(softnet_data, oldcpu);

	/* Find end of our completion_queue. */
	list_skb = &sd->completion_queue;
	while (*list_skb)
		list_skb = &(*list_skb)->next;
	/* Append completion queue from offline CPU. */
	*list_skb = oldsd->completion_queue;
	oldsd->completion_queue = NULL;

	/* Append output queue from offline CPU. */
6269 6270 6271 6272 6273 6274
	if (oldsd->output_queue) {
		*sd->output_queue_tailp = oldsd->output_queue;
		sd->output_queue_tailp = oldsd->output_queue_tailp;
		oldsd->output_queue = NULL;
		oldsd->output_queue_tailp = &oldsd->output_queue;
	}
6275 6276 6277 6278 6279
	/* Append NAPI poll list from offline CPU. */
	if (!list_empty(&oldsd->poll_list)) {
		list_splice_init(&oldsd->poll_list, &sd->poll_list);
		raise_softirq_irqoff(NET_RX_SOFTIRQ);
	}
L
Linus Torvalds 已提交
6280 6281 6282 6283 6284

	raise_softirq_irqoff(NET_TX_SOFTIRQ);
	local_irq_enable();

	/* Process offline CPU's input_pkt_queue */
6285
	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
L
Linus Torvalds 已提交
6286
		netif_rx(skb);
6287
		input_queue_head_incr(oldsd);
T
Tom Herbert 已提交
6288
	}
6289
	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6290
		netif_rx(skb);
6291 6292
		input_queue_head_incr(oldsd);
	}
L
Linus Torvalds 已提交
6293 6294 6295 6296 6297

	return NOTIFY_OK;
}


6298
/**
6299 6300 6301 6302
 *	netdev_increment_features - increment feature set by one
 *	@all: current feature set
 *	@one: new feature set
 *	@mask: mask feature set
6303 6304
 *
 *	Computes a new feature set after adding a device with feature set
6305 6306
 *	@one to the master device with current feature set @all.  Will not
 *	enable anything that is off in @mask. Returns the new feature set.
6307
 */
6308 6309
netdev_features_t netdev_increment_features(netdev_features_t all,
	netdev_features_t one, netdev_features_t mask)
6310
{
6311 6312 6313
	if (mask & NETIF_F_GEN_CSUM)
		mask |= NETIF_F_ALL_CSUM;
	mask |= NETIF_F_VLAN_CHALLENGED;
6314

6315 6316
	all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
	all &= one | ~NETIF_F_ALL_FOR_ALL;
6317

6318 6319 6320
	/* If one device supports hw checksumming, set for all. */
	if (all & NETIF_F_GEN_CSUM)
		all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6321 6322 6323

	return all;
}
6324
EXPORT_SYMBOL(netdev_increment_features);
6325

6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338
static struct hlist_head *netdev_create_hash(void)
{
	int i;
	struct hlist_head *hash;

	hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
	if (hash != NULL)
		for (i = 0; i < NETDEV_HASHENTRIES; i++)
			INIT_HLIST_HEAD(&hash[i]);

	return hash;
}

6339
/* Initialize per network namespace state */
6340
static int __net_init netdev_init(struct net *net)
6341 6342 6343
{
	INIT_LIST_HEAD(&net->dev_base_head);

6344 6345 6346
	net->dev_name_head = netdev_create_hash();
	if (net->dev_name_head == NULL)
		goto err_name;
6347

6348 6349 6350
	net->dev_index_head = netdev_create_hash();
	if (net->dev_index_head == NULL)
		goto err_idx;
6351 6352

	return 0;
6353 6354 6355 6356 6357

err_idx:
	kfree(net->dev_name_head);
err_name:
	return -ENOMEM;
6358 6359
}

6360 6361 6362 6363 6364 6365
/**
 *	netdev_drivername - network driver for the device
 *	@dev: network device
 *
 *	Determine network driver for device.
 */
6366
const char *netdev_drivername(const struct net_device *dev)
6367
{
6368 6369
	const struct device_driver *driver;
	const struct device *parent;
6370
	const char *empty = "";
6371 6372 6373

	parent = dev->dev.parent;
	if (!parent)
6374
		return empty;
6375 6376 6377

	driver = parent->driver;
	if (driver && driver->name)
6378 6379
		return driver->name;
	return empty;
6380 6381
}

6382
int __netdev_printk(const char *level, const struct net_device *dev,
6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396
			   struct va_format *vaf)
{
	int r;

	if (dev && dev->dev.parent)
		r = dev_printk(level, dev->dev.parent, "%s: %pV",
			       netdev_name(dev), vaf);
	else if (dev)
		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
	else
		r = printk("%s(NULL net_device): %pV", level, vaf);

	return r;
}
6397
EXPORT_SYMBOL(__netdev_printk);
6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444

int netdev_printk(const char *level, const struct net_device *dev,
		  const char *format, ...)
{
	struct va_format vaf;
	va_list args;
	int r;

	va_start(args, format);

	vaf.fmt = format;
	vaf.va = &args;

	r = __netdev_printk(level, dev, &vaf);
	va_end(args);

	return r;
}
EXPORT_SYMBOL(netdev_printk);

#define define_netdev_printk_level(func, level)			\
int func(const struct net_device *dev, const char *fmt, ...)	\
{								\
	int r;							\
	struct va_format vaf;					\
	va_list args;						\
								\
	va_start(args, fmt);					\
								\
	vaf.fmt = fmt;						\
	vaf.va = &args;						\
								\
	r = __netdev_printk(level, dev, &vaf);			\
	va_end(args);						\
								\
	return r;						\
}								\
EXPORT_SYMBOL(func);

define_netdev_printk_level(netdev_emerg, KERN_EMERG);
define_netdev_printk_level(netdev_alert, KERN_ALERT);
define_netdev_printk_level(netdev_crit, KERN_CRIT);
define_netdev_printk_level(netdev_err, KERN_ERR);
define_netdev_printk_level(netdev_warn, KERN_WARNING);
define_netdev_printk_level(netdev_notice, KERN_NOTICE);
define_netdev_printk_level(netdev_info, KERN_INFO);

6445
static void __net_exit netdev_exit(struct net *net)
6446 6447 6448 6449 6450
{
	kfree(net->dev_name_head);
	kfree(net->dev_index_head);
}

6451
static struct pernet_operations __net_initdata netdev_net_ops = {
6452 6453 6454 6455
	.init = netdev_init,
	.exit = netdev_exit,
};

6456
static void __net_exit default_device_exit(struct net *net)
6457
{
6458
	struct net_device *dev, *aux;
6459
	/*
6460
	 * Push all migratable network devices back to the
6461 6462 6463
	 * initial network namespace
	 */
	rtnl_lock();
6464
	for_each_netdev_safe(net, dev, aux) {
6465
		int err;
6466
		char fb_name[IFNAMSIZ];
6467 6468 6469 6470 6471

		/* Ignore unmoveable devices (i.e. loopback) */
		if (dev->features & NETIF_F_NETNS_LOCAL)
			continue;

6472 6473 6474
		/* Leave virtual devices for the generic cleanup */
		if (dev->rtnl_link_ops)
			continue;
6475

L
Lucas De Marchi 已提交
6476
		/* Push remaining network devices to init_net */
6477 6478
		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
		err = dev_change_net_namespace(dev, &init_net, fb_name);
6479
		if (err) {
6480 6481
			pr_emerg("%s: failed to move %s to init_net: %d\n",
				 __func__, dev->name, err);
6482
			BUG();
6483 6484 6485 6486 6487
		}
	}
	rtnl_unlock();
}

6488 6489 6490
static void __net_exit default_device_exit_batch(struct list_head *net_list)
{
	/* At exit all network devices most be removed from a network
6491
	 * namespace.  Do this in the reverse order of registration.
6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508
	 * Do this across as many network namespaces as possible to
	 * improve batching efficiency.
	 */
	struct net_device *dev;
	struct net *net;
	LIST_HEAD(dev_kill_list);

	rtnl_lock();
	list_for_each_entry(net, net_list, exit_list) {
		for_each_netdev_reverse(net, dev) {
			if (dev->rtnl_link_ops)
				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
			else
				unregister_netdevice_queue(dev, &dev_kill_list);
		}
	}
	unregister_netdevice_many(&dev_kill_list);
E
Eric Dumazet 已提交
6509
	list_del(&dev_kill_list);
6510 6511 6512
	rtnl_unlock();
}

6513
static struct pernet_operations __net_initdata default_device_ops = {
6514
	.exit = default_device_exit,
6515
	.exit_batch = default_device_exit_batch,
6516 6517
};

L
Linus Torvalds 已提交
6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537
/*
 *	Initialize the DEV module. At boot time this walks the device list and
 *	unhooks any devices that fail to initialise (normally hardware not
 *	present) and leaves us with a valid list of present and active devices.
 *
 */

/*
 *       This is called single threaded during boot, so no need
 *       to take the rtnl semaphore.
 */
static int __init net_dev_init(void)
{
	int i, rc = -ENOMEM;

	BUG_ON(!dev_boot_phase);

	if (dev_proc_init())
		goto out;

6538
	if (netdev_kobject_init())
L
Linus Torvalds 已提交
6539 6540 6541
		goto out;

	INIT_LIST_HEAD(&ptype_all);
6542
	for (i = 0; i < PTYPE_HASH_SIZE; i++)
L
Linus Torvalds 已提交
6543 6544
		INIT_LIST_HEAD(&ptype_base[i]);

6545 6546
	if (register_pernet_subsys(&netdev_net_ops))
		goto out;
L
Linus Torvalds 已提交
6547 6548 6549 6550 6551

	/*
	 *	Initialise the packet receive queues.
	 */

6552
	for_each_possible_cpu(i) {
E
Eric Dumazet 已提交
6553
		struct softnet_data *sd = &per_cpu(softnet_data, i);
L
Linus Torvalds 已提交
6554

C
Changli Gao 已提交
6555
		memset(sd, 0, sizeof(*sd));
E
Eric Dumazet 已提交
6556
		skb_queue_head_init(&sd->input_pkt_queue);
6557
		skb_queue_head_init(&sd->process_queue);
E
Eric Dumazet 已提交
6558 6559
		sd->completion_queue = NULL;
		INIT_LIST_HEAD(&sd->poll_list);
6560 6561
		sd->output_queue = NULL;
		sd->output_queue_tailp = &sd->output_queue;
E
Eric Dumazet 已提交
6562
#ifdef CONFIG_RPS
E
Eric Dumazet 已提交
6563 6564 6565 6566
		sd->csd.func = rps_trigger_softirq;
		sd->csd.info = sd;
		sd->csd.flags = 0;
		sd->cpu = i;
6567
#endif
T
Tom Herbert 已提交
6568

E
Eric Dumazet 已提交
6569 6570 6571 6572
		sd->backlog.poll = process_backlog;
		sd->backlog.weight = weight_p;
		sd->backlog.gro_list = NULL;
		sd->backlog.gro_count = 0;
L
Linus Torvalds 已提交
6573 6574 6575 6576
	}

	dev_boot_phase = 0;

6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591
	/* The loopback device is special if any other network devices
	 * is present in a network namespace the loopback device must
	 * be present. Since we now dynamically allocate and free the
	 * loopback device ensure this invariant is maintained by
	 * keeping the loopback device as the first device on the
	 * list of network devices.  Ensuring the loopback devices
	 * is the first device that appears and the last network device
	 * that disappears.
	 */
	if (register_pernet_device(&loopback_net_ops))
		goto out;

	if (register_pernet_device(&default_device_ops))
		goto out;

6592 6593
	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
L
Linus Torvalds 已提交
6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604

	hotcpu_notifier(dev_cpu_callback, 0);
	dst_init();
	dev_mcast_init();
	rc = 0;
out:
	return rc;
}

subsys_initcall(net_dev_init);

6605 6606
static int __init initialize_hashrnd(void)
{
T
Tom Herbert 已提交
6607
	get_random_bytes(&hashrnd, sizeof(hashrnd));
6608 6609 6610 6611 6612
	return 0;
}

late_initcall_sync(initialize_hashrnd);