dev.c 162.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * 	NET3	Protocol independent device support routines.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 *	Derived from the non IP parts of dev.c 1.0.19
10
 * 		Authors:	Ross Biro
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
 *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *				Mark Evans, <evansmp@uhura.aston.ac.uk>
 *
 *	Additional Authors:
 *		Florian la Roche <rzsfl@rz.uni-sb.de>
 *		Alan Cox <gw4pts@gw4pts.ampr.org>
 *		David Hinds <dahinds@users.sourceforge.net>
 *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
 *		Adam Sulmicki <adam@cfar.umd.edu>
 *              Pekka Riikonen <priikone@poesidon.pspt.fi>
 *
 *	Changes:
 *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
 *              			to 2 if register_netdev gets called
 *              			before net_dev_init & also removed a
 *              			few lines of code in the process.
 *		Alan Cox	:	device private ioctl copies fields back.
 *		Alan Cox	:	Transmit queue code does relevant
 *					stunts to keep the queue safe.
 *		Alan Cox	:	Fixed double lock.
 *		Alan Cox	:	Fixed promisc NULL pointer trap
 *		????????	:	Support the full private ioctl range
 *		Alan Cox	:	Moved ioctl permission check into
 *					drivers
 *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
 *		Alan Cox	:	100 backlog just doesn't cut it when
 *					you start doing multicast video 8)
 *		Alan Cox	:	Rewrote net_bh and list manager.
 *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths.
 *		Alan Cox	:	Took out transmit every packet pass
 *					Saved a few bytes in the ioctl handler
 *		Alan Cox	:	Network driver sets packet type before
 *					calling netif_rx. Saves a function
 *					call a packet.
 *		Alan Cox	:	Hashed net_bh()
 *		Richard Kooijman:	Timestamp fixes.
 *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
 *		Alan Cox	:	Device lock protection.
 *		Alan Cox	: 	Fixed nasty side effect of device close
 *					changes.
 *		Rudi Cilibrasi	:	Pass the right thing to
 *					set_mac_address()
 *		Dave Miller	:	32bit quantity for the device lock to
 *					make it work out on a Sparc.
 *		Bjorn Ekwall	:	Added KERNELD hack.
 *		Alan Cox	:	Cleaned up the backlog initialise.
 *		Craig Metz	:	SIOCGIFCONF fix if space for under
 *					1 device.
 *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
 *					is no device open function.
 *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
 *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
 *		Cyrus Durgin	:	Cleaned for KMOD
 *		Adam Sulmicki   :	Bug Fix : Network Device Unload
 *					A network device unload needs to purge
 *					the backlog queue.
 *	Paul Rusty Russell	:	SIOCSIFNAME
 *              Pekka Riikonen  :	Netdev boot-time settings code
 *              Andrew Morton   :       Make unregister_netdevice wait
 *              			indefinitely on dev->refcnt
 * 		J Hadi Salim	:	- Backlog queue sampling
 *				        - netif_rx() feedback
 */

#include <asm/uaccess.h>
#include <linux/bitops.h>
77
#include <linux/capability.h>
L
Linus Torvalds 已提交
78 79 80
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/kernel.h>
81
#include <linux/hash.h>
82
#include <linux/slab.h>
L
Linus Torvalds 已提交
83
#include <linux/sched.h>
A
Arjan van de Ven 已提交
84
#include <linux/mutex.h>
L
Linus Torvalds 已提交
85 86 87 88 89 90 91 92 93
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
94
#include <linux/ethtool.h>
L
Linus Torvalds 已提交
95 96
#include <linux/notifier.h>
#include <linux/skbuff.h>
97
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
98 99 100 101 102 103 104 105
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <net/dst.h>
#include <net/pkt_sched.h>
#include <net/checksum.h>
106
#include <net/xfrm.h>
L
Linus Torvalds 已提交
107 108 109 110 111 112 113
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/netpoll.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
114
#include <net/wext.h>
L
Linus Torvalds 已提交
115 116
#include <net/iw_handler.h>
#include <asm/current.h>
S
Steve Grubb 已提交
117
#include <linux/audit.h>
118
#include <linux/dmaengine.h>
119
#include <linux/err.h>
120
#include <linux/ctype.h>
121
#include <linux/if_arp.h>
122
#include <linux/if_vlan.h>
123
#include <linux/ip.h>
124
#include <net/ip.h>
125 126
#include <linux/ipv6.h>
#include <linux/in.h>
D
David S. Miller 已提交
127 128
#include <linux/jhash.h>
#include <linux/random.h>
129
#include <trace/events/napi.h>
130
#include <trace/events/net.h>
131
#include <trace/events/skb.h>
132
#include <linux/pci.h>
133
#include <linux/inetdevice.h>
134
#include <linux/cpu_rmap.h>
135
#include <linux/net_tstamp.h>
136
#include <linux/static_key.h>
137
#include <net/flow_keys.h>
L
Linus Torvalds 已提交
138

139 140
#include "net-sysfs.h"

141 142 143
/* Instead of increasing this, you should create a hash table. */
#define MAX_GRO_SKBS 8

H
Herbert Xu 已提交
144 145 146
/* This should be increased if a protocol with a bigger head is added. */
#define GRO_MAX_HEAD (MAX_HEADER + 128)

L
Linus Torvalds 已提交
147 148 149 150 151 152 153 154 155 156 157
/*
 *	The list of packet types we will receive (as opposed to discard)
 *	and the routines to invoke.
 *
 *	Why 16. Because with 16 the only overlap we get on a hash of the
 *	low nibble of the protocol value is RARP/SNAP/X.25.
 *
 *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
 *             sure which should go first, but I bet it won't make much
 *             difference if we are running VLANs.  The good news is that
 *             this protocol won't be in the list unless compiled in, so
S
Stephen Hemminger 已提交
158
 *             the average user (w/out VLANs) will not be adversely affected.
L
Linus Torvalds 已提交
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
 *             --BLG
 *
 *		0800	IP
 *		8100    802.1Q VLAN
 *		0001	802.3
 *		0002	AX.25
 *		0004	802.2
 *		8035	RARP
 *		0005	SNAP
 *		0805	X.25
 *		0806	ARP
 *		8137	IPX
 *		0009	Localtalk
 *		86DD	IPv6
 */

175 176 177
#define PTYPE_HASH_SIZE	(16)
#define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)

L
Linus Torvalds 已提交
178
static DEFINE_SPINLOCK(ptype_lock);
179
static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
180
static struct list_head ptype_all __read_mostly;	/* Taps */
L
Linus Torvalds 已提交
181 182

/*
183
 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
L
Linus Torvalds 已提交
184 185
 * semaphore.
 *
186
 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
L
Linus Torvalds 已提交
187 188
 *
 * Writers must hold the rtnl semaphore while they loop through the
189
 * dev_base_head list, and hold dev_base_lock for writing when they do the
L
Linus Torvalds 已提交
190 191 192 193 194 195 196 197 198 199 200 201 202 203
 * actual updates.  This allows pure readers to access the list even
 * while a writer is preparing to update it.
 *
 * To put it another way, dev_base_lock is held for writing only to
 * protect against pure readers; the rtnl semaphore provides the
 * protection against other writers.
 *
 * See, for example usages, register_netdevice() and
 * unregister_netdevice(), which must be called with the rtnl
 * semaphore held.
 */
DEFINE_RWLOCK(dev_base_lock);
EXPORT_SYMBOL(dev_base_lock);

204 205 206 207 208
static inline void dev_base_seq_inc(struct net *net)
{
	while (++net->dev_base_seq == 0);
}

209
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
L
Linus Torvalds 已提交
210
{
211 212
	unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));

213
	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
L
Linus Torvalds 已提交
214 215
}

216
static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
L
Linus Torvalds 已提交
217
{
218
	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
L
Linus Torvalds 已提交
219 220
}

E
Eric Dumazet 已提交
221
static inline void rps_lock(struct softnet_data *sd)
222 223
{
#ifdef CONFIG_RPS
E
Eric Dumazet 已提交
224
	spin_lock(&sd->input_pkt_queue.lock);
225 226 227
#endif
}

E
Eric Dumazet 已提交
228
static inline void rps_unlock(struct softnet_data *sd)
229 230
{
#ifdef CONFIG_RPS
E
Eric Dumazet 已提交
231
	spin_unlock(&sd->input_pkt_queue.lock);
232 233 234
#endif
}

235 236 237
/* Device list insertion */
static int list_netdevice(struct net_device *dev)
{
238
	struct net *net = dev_net(dev);
239 240 241 242

	ASSERT_RTNL();

	write_lock_bh(&dev_base_lock);
243
	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
244
	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
245 246
	hlist_add_head_rcu(&dev->index_hlist,
			   dev_index_hash(net, dev->ifindex));
247
	write_unlock_bh(&dev_base_lock);
248 249 250

	dev_base_seq_inc(net);

251 252 253
	return 0;
}

254 255 256
/* Device list removal
 * caller must respect a RCU grace period before freeing/reusing dev
 */
257 258 259 260 261 262
static void unlist_netdevice(struct net_device *dev)
{
	ASSERT_RTNL();

	/* Unlink dev from the device chain */
	write_lock_bh(&dev_base_lock);
263
	list_del_rcu(&dev->dev_list);
264
	hlist_del_rcu(&dev->name_hlist);
265
	hlist_del_rcu(&dev->index_hlist);
266
	write_unlock_bh(&dev_base_lock);
267 268

	dev_base_seq_inc(dev_net(dev));
269 270
}

L
Linus Torvalds 已提交
271 272 273 274
/*
 *	Our notifier list
 */

275
static RAW_NOTIFIER_HEAD(netdev_chain);
L
Linus Torvalds 已提交
276 277 278 279 280

/*
 *	Device drivers call our routines to queue packets here. We empty the
 *	queue in the local softnet handler.
 */
281

282
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
E
Eric Dumazet 已提交
283
EXPORT_PER_CPU_SYMBOL(softnet_data);
L
Linus Torvalds 已提交
284

285
#ifdef CONFIG_LOCKDEP
286
/*
287
 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
 * according to dev->type
 */
static const unsigned short netdev_lock_type[] =
	{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
303 304 305
	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
306

307
static const char *const netdev_lock_name[] =
308 309 310 311 312 313 314 315 316 317 318 319
	{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
	 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
	 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
	 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
	 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
	 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
	 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
	 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
	 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
	 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
	 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
	 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
320 321 322
	 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
	 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
	 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
323 324

static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
325
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
326 327 328 329 330 331 332 333 334 335 336 337

static inline unsigned short netdev_lock_pos(unsigned short dev_type)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
		if (netdev_lock_type[i] == dev_type)
			return i;
	/* the last key is used by default */
	return ARRAY_SIZE(netdev_lock_type) - 1;
}

338 339
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
						 unsigned short dev_type)
340 341 342 343 344 345 346
{
	int i;

	i = netdev_lock_pos(dev_type);
	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
				   netdev_lock_name[i]);
}
347 348 349 350 351 352 353 354 355 356

static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{
	int i;

	i = netdev_lock_pos(dev->type);
	lockdep_set_class_and_name(&dev->addr_list_lock,
				   &netdev_addr_lock_key[i],
				   netdev_lock_name[i]);
}
357
#else
358 359 360 361 362
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
						 unsigned short dev_type)
{
}
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
363 364 365
{
}
#endif
L
Linus Torvalds 已提交
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388

/*******************************************************************************

		Protocol management and registration routines

*******************************************************************************/

/*
 *	Add a protocol ID to the list. Now that the input handler is
 *	smarter we can dispense with all the messy stuff that used to be
 *	here.
 *
 *	BEWARE!!! Protocol handlers, mangling input packets,
 *	MUST BE last in hash buckets and checking protocol handlers
 *	MUST start from promiscuous ptype_all chain in net_bh.
 *	It is true now, do not change it.
 *	Explanation follows: if protocol handler, mangling packet, will
 *	be the first on list, it is not able to sense, that packet
 *	is cloned and should be copied-on-write, so that it will
 *	change it and subsequent readers will get broken packet.
 *							--ANK (980803)
 */

389 390 391 392 393 394 395 396
static inline struct list_head *ptype_head(const struct packet_type *pt)
{
	if (pt->type == htons(ETH_P_ALL))
		return &ptype_all;
	else
		return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
}

L
Linus Torvalds 已提交
397 398 399 400 401 402 403 404
/**
 *	dev_add_pack - add packet handler
 *	@pt: packet type declaration
 *
 *	Add a protocol handler to the networking stack. The passed &packet_type
 *	is linked into kernel lists and may not be freed until it has been
 *	removed from the kernel lists.
 *
405
 *	This call does not sleep therefore it can not
L
Linus Torvalds 已提交
406 407 408 409 410 411
 *	guarantee all CPU's that are in middle of receiving packets
 *	will see the new packet type (until the next received packet).
 */

void dev_add_pack(struct packet_type *pt)
{
412
	struct list_head *head = ptype_head(pt);
L
Linus Torvalds 已提交
413

414 415 416
	spin_lock(&ptype_lock);
	list_add_rcu(&pt->list, head);
	spin_unlock(&ptype_lock);
L
Linus Torvalds 已提交
417
}
E
Eric Dumazet 已提交
418
EXPORT_SYMBOL(dev_add_pack);
L
Linus Torvalds 已提交
419 420 421 422 423 424 425 426

/**
 *	__dev_remove_pack	 - remove packet handler
 *	@pt: packet type declaration
 *
 *	Remove a protocol handler that was previously added to the kernel
 *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
 *	from the kernel lists and can be freed or reused once this function
427
 *	returns.
L
Linus Torvalds 已提交
428 429 430 431 432 433 434
 *
 *      The packet type might still be in use by receivers
 *	and must not be freed until after all the CPU's have gone
 *	through a quiescent state.
 */
void __dev_remove_pack(struct packet_type *pt)
{
435
	struct list_head *head = ptype_head(pt);
L
Linus Torvalds 已提交
436 437
	struct packet_type *pt1;

438
	spin_lock(&ptype_lock);
L
Linus Torvalds 已提交
439 440 441 442 443 444 445 446

	list_for_each_entry(pt1, head, list) {
		if (pt == pt1) {
			list_del_rcu(&pt->list);
			goto out;
		}
	}

447
	pr_warn("dev_remove_pack: %p not found\n", pt);
L
Linus Torvalds 已提交
448
out:
449
	spin_unlock(&ptype_lock);
L
Linus Torvalds 已提交
450
}
E
Eric Dumazet 已提交
451 452
EXPORT_SYMBOL(__dev_remove_pack);

L
Linus Torvalds 已提交
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
/**
 *	dev_remove_pack	 - remove packet handler
 *	@pt: packet type declaration
 *
 *	Remove a protocol handler that was previously added to the kernel
 *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
 *	from the kernel lists and can be freed or reused once this function
 *	returns.
 *
 *	This call sleeps to guarantee that no CPU is looking at the packet
 *	type after return.
 */
void dev_remove_pack(struct packet_type *pt)
{
	__dev_remove_pack(pt);
468

L
Linus Torvalds 已提交
469 470
	synchronize_net();
}
E
Eric Dumazet 已提交
471
EXPORT_SYMBOL(dev_remove_pack);
L
Linus Torvalds 已提交
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499

/******************************************************************************

		      Device Boot-time Settings Routines

*******************************************************************************/

/* Boot time configuration table */
static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];

/**
 *	netdev_boot_setup_add	- add new setup entry
 *	@name: name of the device
 *	@map: configured settings for the device
 *
 *	Adds new setup entry to the dev_boot_setup list.  The function
 *	returns 0 on error and 1 on success.  This is a generic routine to
 *	all netdevices.
 */
static int netdev_boot_setup_add(char *name, struct ifmap *map)
{
	struct netdev_boot_setup *s;
	int i;

	s = dev_boot_setup;
	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
			memset(s[i].name, 0, sizeof(s[i].name));
500
			strlcpy(s[i].name, name, IFNAMSIZ);
L
Linus Torvalds 已提交
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
			memcpy(&s[i].map, map, sizeof(s[i].map));
			break;
		}
	}

	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
}

/**
 *	netdev_boot_setup_check	- check boot time settings
 *	@dev: the netdevice
 *
 * 	Check boot time settings for the device.
 *	The found settings are set for the device to be used
 *	later in the device probing.
 *	Returns 0 if no settings found, 1 if they are.
 */
int netdev_boot_setup_check(struct net_device *dev)
{
	struct netdev_boot_setup *s = dev_boot_setup;
	int i;

	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
525
		    !strcmp(dev->name, s[i].name)) {
L
Linus Torvalds 已提交
526 527 528 529 530 531 532 533 534
			dev->irq 	= s[i].map.irq;
			dev->base_addr 	= s[i].map.base_addr;
			dev->mem_start 	= s[i].map.mem_start;
			dev->mem_end 	= s[i].map.mem_end;
			return 1;
		}
	}
	return 0;
}
E
Eric Dumazet 已提交
535
EXPORT_SYMBOL(netdev_boot_setup_check);
L
Linus Torvalds 已提交
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559


/**
 *	netdev_boot_base	- get address from boot time settings
 *	@prefix: prefix for network device
 *	@unit: id for network device
 *
 * 	Check boot time settings for the base address of device.
 *	The found settings are set for the device to be used
 *	later in the device probing.
 *	Returns 0 if no settings found.
 */
unsigned long netdev_boot_base(const char *prefix, int unit)
{
	const struct netdev_boot_setup *s = dev_boot_setup;
	char name[IFNAMSIZ];
	int i;

	sprintf(name, "%s%d", prefix, unit);

	/*
	 * If device already registered then return base of 1
	 * to indicate not to probe for this interface
	 */
560
	if (__dev_get_by_name(&init_net, name))
L
Linus Torvalds 已提交
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
		return 1;

	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
		if (!strcmp(name, s[i].name))
			return s[i].map.base_addr;
	return 0;
}

/*
 * Saves at boot time configured settings for any netdevice.
 */
int __init netdev_boot_setup(char *str)
{
	int ints[5];
	struct ifmap map;

	str = get_options(str, ARRAY_SIZE(ints), ints);
	if (!str || !*str)
		return 0;

	/* Save settings */
	memset(&map, 0, sizeof(map));
	if (ints[0] > 0)
		map.irq = ints[1];
	if (ints[0] > 1)
		map.base_addr = ints[2];
	if (ints[0] > 2)
		map.mem_start = ints[3];
	if (ints[0] > 3)
		map.mem_end = ints[4];

	/* Add new entry to the list */
	return netdev_boot_setup_add(str, &map);
}

__setup("netdev=", netdev_boot_setup);

/*******************************************************************************

			    Device Interface Subroutines

*******************************************************************************/

/**
 *	__dev_get_by_name	- find a device by its name
606
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
607 608 609 610 611 612 613 614 615
 *	@name: name to find
 *
 *	Find an interface by name. Must be called under RTNL semaphore
 *	or @dev_base_lock. If the name is found a pointer to the device
 *	is returned. If the name is not found then %NULL is returned. The
 *	reference counters are not incremented so the caller must be
 *	careful with locks.
 */

616
struct net_device *__dev_get_by_name(struct net *net, const char *name)
L
Linus Torvalds 已提交
617 618
{
	struct hlist_node *p;
E
Eric Dumazet 已提交
619 620
	struct net_device *dev;
	struct hlist_head *head = dev_name_hash(net, name);
L
Linus Torvalds 已提交
621

E
Eric Dumazet 已提交
622
	hlist_for_each_entry(dev, p, head, name_hlist)
L
Linus Torvalds 已提交
623 624
		if (!strncmp(dev->name, name, IFNAMSIZ))
			return dev;
E
Eric Dumazet 已提交
625

L
Linus Torvalds 已提交
626 627
	return NULL;
}
E
Eric Dumazet 已提交
628
EXPORT_SYMBOL(__dev_get_by_name);
L
Linus Torvalds 已提交
629

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
/**
 *	dev_get_by_name_rcu	- find a device by its name
 *	@net: the applicable net namespace
 *	@name: name to find
 *
 *	Find an interface by name.
 *	If the name is found a pointer to the device is returned.
 * 	If the name is not found then %NULL is returned.
 *	The reference counters are not incremented so the caller must be
 *	careful with locks. The caller must hold RCU lock.
 */

struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
{
	struct hlist_node *p;
	struct net_device *dev;
	struct hlist_head *head = dev_name_hash(net, name);

	hlist_for_each_entry_rcu(dev, p, head, name_hlist)
		if (!strncmp(dev->name, name, IFNAMSIZ))
			return dev;

	return NULL;
}
EXPORT_SYMBOL(dev_get_by_name_rcu);

L
Linus Torvalds 已提交
656 657
/**
 *	dev_get_by_name		- find a device by its name
658
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
659 660 661 662 663 664 665 666 667
 *	@name: name to find
 *
 *	Find an interface by name. This can be called from any
 *	context and does its own locking. The returned handle has
 *	the usage count incremented and the caller must use dev_put() to
 *	release it when it is no longer needed. %NULL is returned if no
 *	matching device is found.
 */

668
struct net_device *dev_get_by_name(struct net *net, const char *name)
L
Linus Torvalds 已提交
669 670 671
{
	struct net_device *dev;

672 673
	rcu_read_lock();
	dev = dev_get_by_name_rcu(net, name);
L
Linus Torvalds 已提交
674 675
	if (dev)
		dev_hold(dev);
676
	rcu_read_unlock();
L
Linus Torvalds 已提交
677 678
	return dev;
}
E
Eric Dumazet 已提交
679
EXPORT_SYMBOL(dev_get_by_name);
L
Linus Torvalds 已提交
680 681 682

/**
 *	__dev_get_by_index - find a device by its ifindex
683
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
684 685 686 687 688 689 690 691 692
 *	@ifindex: index of device
 *
 *	Search for an interface by index. Returns %NULL if the device
 *	is not found or a pointer to the device. The device has not
 *	had its reference counter increased so the caller must be careful
 *	about locking. The caller must hold either the RTNL semaphore
 *	or @dev_base_lock.
 */

693
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
L
Linus Torvalds 已提交
694 695
{
	struct hlist_node *p;
E
Eric Dumazet 已提交
696 697
	struct net_device *dev;
	struct hlist_head *head = dev_index_hash(net, ifindex);
L
Linus Torvalds 已提交
698

E
Eric Dumazet 已提交
699
	hlist_for_each_entry(dev, p, head, index_hlist)
L
Linus Torvalds 已提交
700 701
		if (dev->ifindex == ifindex)
			return dev;
E
Eric Dumazet 已提交
702

L
Linus Torvalds 已提交
703 704
	return NULL;
}
E
Eric Dumazet 已提交
705
EXPORT_SYMBOL(__dev_get_by_index);
L
Linus Torvalds 已提交
706

707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
/**
 *	dev_get_by_index_rcu - find a device by its ifindex
 *	@net: the applicable net namespace
 *	@ifindex: index of device
 *
 *	Search for an interface by index. Returns %NULL if the device
 *	is not found or a pointer to the device. The device has not
 *	had its reference counter increased so the caller must be careful
 *	about locking. The caller must hold RCU lock.
 */

struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
{
	struct hlist_node *p;
	struct net_device *dev;
	struct hlist_head *head = dev_index_hash(net, ifindex);

	hlist_for_each_entry_rcu(dev, p, head, index_hlist)
		if (dev->ifindex == ifindex)
			return dev;

	return NULL;
}
EXPORT_SYMBOL(dev_get_by_index_rcu);

L
Linus Torvalds 已提交
732 733 734

/**
 *	dev_get_by_index - find a device by its ifindex
735
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
736 737 738 739 740 741 742 743
 *	@ifindex: index of device
 *
 *	Search for an interface by index. Returns NULL if the device
 *	is not found or a pointer to the device. The device returned has
 *	had a reference added and the pointer is safe until the user calls
 *	dev_put to indicate they have finished with it.
 */

744
struct net_device *dev_get_by_index(struct net *net, int ifindex)
L
Linus Torvalds 已提交
745 746 747
{
	struct net_device *dev;

748 749
	rcu_read_lock();
	dev = dev_get_by_index_rcu(net, ifindex);
L
Linus Torvalds 已提交
750 751
	if (dev)
		dev_hold(dev);
752
	rcu_read_unlock();
L
Linus Torvalds 已提交
753 754
	return dev;
}
E
Eric Dumazet 已提交
755
EXPORT_SYMBOL(dev_get_by_index);
L
Linus Torvalds 已提交
756 757

/**
758
 *	dev_getbyhwaddr_rcu - find a device by its hardware address
759
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
760 761 762 763
 *	@type: media type of device
 *	@ha: hardware address
 *
 *	Search for an interface by MAC address. Returns NULL if the device
E
Eric Dumazet 已提交
764 765
 *	is not found or a pointer to the device.
 *	The caller must hold RCU or RTNL.
766
 *	The returned device has not had its ref count increased
L
Linus Torvalds 已提交
767 768 769 770
 *	and the caller must therefore be careful about locking
 *
 */

771 772
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
				       const char *ha)
L
Linus Torvalds 已提交
773 774 775
{
	struct net_device *dev;

776
	for_each_netdev_rcu(net, dev)
L
Linus Torvalds 已提交
777 778
		if (dev->type == type &&
		    !memcmp(dev->dev_addr, ha, dev->addr_len))
779 780 781
			return dev;

	return NULL;
L
Linus Torvalds 已提交
782
}
783
EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
784

785
struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
L
Linus Torvalds 已提交
786 787 788
{
	struct net_device *dev;

789
	ASSERT_RTNL();
790
	for_each_netdev(net, dev)
791
		if (dev->type == type)
792 793 794
			return dev;

	return NULL;
795 796 797
}
EXPORT_SYMBOL(__dev_getfirstbyhwtype);

798
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
799
{
800
	struct net_device *dev, *ret = NULL;
801

802 803 804 805 806 807 808 809 810
	rcu_read_lock();
	for_each_netdev_rcu(net, dev)
		if (dev->type == type) {
			dev_hold(dev);
			ret = dev;
			break;
		}
	rcu_read_unlock();
	return ret;
L
Linus Torvalds 已提交
811 812 813 814
}
EXPORT_SYMBOL(dev_getfirstbyhwtype);

/**
E
Eric Dumazet 已提交
815
 *	dev_get_by_flags_rcu - find any device with given flags
816
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
817 818 819 820
 *	@if_flags: IFF_* values
 *	@mask: bitmask of bits in if_flags to check
 *
 *	Search for any interface with the given flags. Returns NULL if a device
E
Eric Dumazet 已提交
821 822
 *	is not found or a pointer to the device. Must be called inside
 *	rcu_read_lock(), and result refcount is unchanged.
L
Linus Torvalds 已提交
823 824
 */

E
Eric Dumazet 已提交
825
struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
E
Eric Dumazet 已提交
826
				    unsigned short mask)
L
Linus Torvalds 已提交
827
{
828
	struct net_device *dev, *ret;
L
Linus Torvalds 已提交
829

830
	ret = NULL;
831
	for_each_netdev_rcu(net, dev) {
L
Linus Torvalds 已提交
832
		if (((dev->flags ^ if_flags) & mask) == 0) {
833
			ret = dev;
L
Linus Torvalds 已提交
834 835 836
			break;
		}
	}
837
	return ret;
L
Linus Torvalds 已提交
838
}
E
Eric Dumazet 已提交
839
EXPORT_SYMBOL(dev_get_by_flags_rcu);
L
Linus Torvalds 已提交
840 841 842 843 844 845

/**
 *	dev_valid_name - check if name is okay for network device
 *	@name: name string
 *
 *	Network device names need to be valid file names to
846 847
 *	to allow sysfs to work.  We also disallow any kind of
 *	whitespace.
L
Linus Torvalds 已提交
848
 */
849
bool dev_valid_name(const char *name)
L
Linus Torvalds 已提交
850
{
851
	if (*name == '\0')
852
		return false;
853
	if (strlen(name) >= IFNAMSIZ)
854
		return false;
855
	if (!strcmp(name, ".") || !strcmp(name, ".."))
856
		return false;
857 858 859

	while (*name) {
		if (*name == '/' || isspace(*name))
860
			return false;
861 862
		name++;
	}
863
	return true;
L
Linus Torvalds 已提交
864
}
E
Eric Dumazet 已提交
865
EXPORT_SYMBOL(dev_valid_name);
L
Linus Torvalds 已提交
866 867

/**
868 869
 *	__dev_alloc_name - allocate a name for a device
 *	@net: network namespace to allocate the device name in
L
Linus Torvalds 已提交
870
 *	@name: name format string
871
 *	@buf:  scratch buffer and result name string
L
Linus Torvalds 已提交
872 873
 *
 *	Passed a format string - eg "lt%d" it will try and find a suitable
S
Stephen Hemminger 已提交
874 875 876 877 878 879
 *	id. It scans list of devices to build up a free map, then chooses
 *	the first empty slot. The caller must hold the dev_base or rtnl lock
 *	while allocating the name and adding the device in order to avoid
 *	duplicates.
 *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
 *	Returns the number of the unit assigned or a negative errno code.
L
Linus Torvalds 已提交
880 881
 */

882
static int __dev_alloc_name(struct net *net, const char *name, char *buf)
L
Linus Torvalds 已提交
883 884 885 886
{
	int i = 0;
	const char *p;
	const int max_netdevices = 8*PAGE_SIZE;
S
Stephen Hemminger 已提交
887
	unsigned long *inuse;
L
Linus Torvalds 已提交
888 889 890 891 892 893 894 895 896 897 898 899 900
	struct net_device *d;

	p = strnchr(name, IFNAMSIZ-1, '%');
	if (p) {
		/*
		 * Verify the string as this thing may have come from
		 * the user.  There must be either one "%d" and no other "%"
		 * characters.
		 */
		if (p[1] != 'd' || strchr(p + 2, '%'))
			return -EINVAL;

		/* Use one page as a bit array of possible slots */
S
Stephen Hemminger 已提交
901
		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
L
Linus Torvalds 已提交
902 903 904
		if (!inuse)
			return -ENOMEM;

905
		for_each_netdev(net, d) {
L
Linus Torvalds 已提交
906 907 908 909 910 911
			if (!sscanf(d->name, name, &i))
				continue;
			if (i < 0 || i >= max_netdevices)
				continue;

			/*  avoid cases where sscanf is not exact inverse of printf */
912
			snprintf(buf, IFNAMSIZ, name, i);
L
Linus Torvalds 已提交
913 914 915 916 917 918 919 920
			if (!strncmp(buf, d->name, IFNAMSIZ))
				set_bit(i, inuse);
		}

		i = find_first_zero_bit(inuse, max_netdevices);
		free_page((unsigned long) inuse);
	}

921 922
	if (buf != name)
		snprintf(buf, IFNAMSIZ, name, i);
923
	if (!__dev_get_by_name(net, buf))
L
Linus Torvalds 已提交
924 925 926 927 928 929 930 931 932
		return i;

	/* It is possible to run out of possible slots
	 * when the name is long and there isn't enough space left
	 * for the digits, or if all bits are used.
	 */
	return -ENFILE;
}

933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
/**
 *	dev_alloc_name - allocate a name for a device
 *	@dev: device
 *	@name: name format string
 *
 *	Passed a format string - eg "lt%d" it will try and find a suitable
 *	id. It scans list of devices to build up a free map, then chooses
 *	the first empty slot. The caller must hold the dev_base or rtnl lock
 *	while allocating the name and adding the device in order to avoid
 *	duplicates.
 *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
 *	Returns the number of the unit assigned or a negative errno code.
 */

int dev_alloc_name(struct net_device *dev, const char *name)
{
	char buf[IFNAMSIZ];
	struct net *net;
	int ret;

953 954
	BUG_ON(!dev_net(dev));
	net = dev_net(dev);
955 956 957 958 959
	ret = __dev_alloc_name(net, name, buf);
	if (ret >= 0)
		strlcpy(dev->name, buf, IFNAMSIZ);
	return ret;
}
E
Eric Dumazet 已提交
960
EXPORT_SYMBOL(dev_alloc_name);
961

962
static int dev_get_valid_name(struct net_device *dev, const char *name)
963
{
964 965 966 967 968
	struct net *net;

	BUG_ON(!dev_net(dev));
	net = dev_net(dev);

969 970 971
	if (!dev_valid_name(name))
		return -EINVAL;

972
	if (strchr(name, '%'))
973
		return dev_alloc_name(dev, name);
974 975
	else if (__dev_get_by_name(net, name))
		return -EEXIST;
976 977
	else if (dev->name != name)
		strlcpy(dev->name, name, IFNAMSIZ);
978 979 980

	return 0;
}
L
Linus Torvalds 已提交
981 982 983 984 985 986 987 988 989

/**
 *	dev_change_name - change name of a device
 *	@dev: device
 *	@newname: name (or format string) must be at least IFNAMSIZ
 *
 *	Change name of a device, can pass format strings "eth%d".
 *	for wildcarding.
 */
990
int dev_change_name(struct net_device *dev, const char *newname)
L
Linus Torvalds 已提交
991
{
992
	char oldname[IFNAMSIZ];
L
Linus Torvalds 已提交
993
	int err = 0;
994
	int ret;
995
	struct net *net;
L
Linus Torvalds 已提交
996 997

	ASSERT_RTNL();
998
	BUG_ON(!dev_net(dev));
L
Linus Torvalds 已提交
999

1000
	net = dev_net(dev);
L
Linus Torvalds 已提交
1001 1002 1003
	if (dev->flags & IFF_UP)
		return -EBUSY;

1004 1005 1006
	if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
		return 0;

1007 1008
	memcpy(oldname, dev->name, IFNAMSIZ);

1009
	err = dev_get_valid_name(dev, newname);
1010 1011
	if (err < 0)
		return err;
L
Linus Torvalds 已提交
1012

1013
rollback:
1014 1015 1016 1017
	ret = device_rename(&dev->dev, dev->name);
	if (ret) {
		memcpy(dev->name, oldname, IFNAMSIZ);
		return ret;
1018
	}
1019 1020

	write_lock_bh(&dev_base_lock);
1021
	hlist_del_rcu(&dev->name_hlist);
1022 1023 1024 1025 1026 1027
	write_unlock_bh(&dev_base_lock);

	synchronize_rcu();

	write_lock_bh(&dev_base_lock);
	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1028 1029
	write_unlock_bh(&dev_base_lock);

1030
	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1031 1032 1033
	ret = notifier_to_errno(ret);

	if (ret) {
1034 1035
		/* err >= 0 after dev_alloc_name() or stores the first errno */
		if (err >= 0) {
1036 1037 1038
			err = ret;
			memcpy(dev->name, oldname, IFNAMSIZ);
			goto rollback;
1039
		} else {
1040
			pr_err("%s: name change rollback failed: %d\n",
1041
			       dev->name, ret);
1042 1043
		}
	}
L
Linus Torvalds 已提交
1044 1045 1046 1047

	return err;
}

1048 1049 1050 1051
/**
 *	dev_set_alias - change ifalias of a device
 *	@dev: device
 *	@alias: name up to IFALIASZ
1052
 *	@len: limit of bytes to copy from info
1053 1054 1055 1056 1057
 *
 *	Set ifalias for a device,
 */
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{
1058 1059
	char *new_ifalias;

1060 1061 1062 1063 1064
	ASSERT_RTNL();

	if (len >= IFALIASZ)
		return -EINVAL;

1065 1066 1067 1068 1069 1070 1071 1072
	if (!len) {
		if (dev->ifalias) {
			kfree(dev->ifalias);
			dev->ifalias = NULL;
		}
		return 0;
	}

1073 1074
	new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
	if (!new_ifalias)
1075
		return -ENOMEM;
1076
	dev->ifalias = new_ifalias;
1077 1078 1079 1080 1081 1082

	strlcpy(dev->ifalias, alias, len+1);
	return len;
}


1083
/**
S
Stephen Hemminger 已提交
1084
 *	netdev_features_change - device changes features
1085 1086 1087 1088 1089 1090
 *	@dev: device to cause notification
 *
 *	Called to indicate a device has changed features.
 */
void netdev_features_change(struct net_device *dev)
{
1091
	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1092 1093 1094
}
EXPORT_SYMBOL(netdev_features_change);

L
Linus Torvalds 已提交
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
/**
 *	netdev_state_change - device changes state
 *	@dev: device to cause notification
 *
 *	Called to indicate a device has changed state. This function calls
 *	the notifier chains for netdev_chain and sends a NEWLINK message
 *	to the routing socket.
 */
void netdev_state_change(struct net_device *dev)
{
	if (dev->flags & IFF_UP) {
1106
		call_netdevice_notifiers(NETDEV_CHANGE, dev);
L
Linus Torvalds 已提交
1107 1108 1109
		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
	}
}
E
Eric Dumazet 已提交
1110
EXPORT_SYMBOL(netdev_state_change);
L
Linus Torvalds 已提交
1111

1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
/**
 * 	netdev_notify_peers - notify network peers about existence of @dev
 * 	@dev: network device
 *
 * Generate traffic such that interested network peers are aware of
 * @dev, such as by generating a gratuitous ARP. This may be used when
 * a device wants to inform the rest of the network about some sort of
 * reconfiguration such as a failover event or virtual machine
 * migration.
 */
void netdev_notify_peers(struct net_device *dev)
{
	rtnl_lock();
	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
	rtnl_unlock();
}
EXPORT_SYMBOL(netdev_notify_peers);

L
Linus Torvalds 已提交
1130 1131
/**
 *	dev_load 	- load a network module
1132
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
1133 1134 1135 1136 1137 1138 1139
 *	@name: name of interface
 *
 *	If a network interface is not present and the process has suitable
 *	privileges this function loads the module. If module loading is not
 *	available in this kernel then it becomes a nop.
 */

1140
void dev_load(struct net *net, const char *name)
L
Linus Torvalds 已提交
1141
{
1142
	struct net_device *dev;
1143
	int no_module;
L
Linus Torvalds 已提交
1144

1145 1146 1147
	rcu_read_lock();
	dev = dev_get_by_name_rcu(net, name);
	rcu_read_unlock();
L
Linus Torvalds 已提交
1148

1149 1150 1151 1152 1153
	no_module = !dev;
	if (no_module && capable(CAP_NET_ADMIN))
		no_module = request_module("netdev-%s", name);
	if (no_module && capable(CAP_SYS_MODULE)) {
		if (!request_module("%s", name))
1154 1155
			pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
				name);
1156
	}
L
Linus Torvalds 已提交
1157
}
E
Eric Dumazet 已提交
1158
EXPORT_SYMBOL(dev_load);
L
Linus Torvalds 已提交
1159

1160
static int __dev_open(struct net_device *dev)
L
Linus Torvalds 已提交
1161
{
1162
	const struct net_device_ops *ops = dev->netdev_ops;
1163
	int ret;
L
Linus Torvalds 已提交
1164

1165 1166
	ASSERT_RTNL();

L
Linus Torvalds 已提交
1167 1168 1169
	if (!netif_device_present(dev))
		return -ENODEV;

1170 1171 1172 1173 1174
	ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
	ret = notifier_to_errno(ret);
	if (ret)
		return ret;

L
Linus Torvalds 已提交
1175
	set_bit(__LINK_STATE_START, &dev->state);
1176

1177 1178
	if (ops->ndo_validate_addr)
		ret = ops->ndo_validate_addr(dev);
1179

1180 1181
	if (!ret && ops->ndo_open)
		ret = ops->ndo_open(dev);
L
Linus Torvalds 已提交
1182

1183 1184 1185
	if (ret)
		clear_bit(__LINK_STATE_START, &dev->state);
	else {
L
Linus Torvalds 已提交
1186
		dev->flags |= IFF_UP;
1187
		net_dmaengine_get();
1188
		dev_set_rx_mode(dev);
L
Linus Torvalds 已提交
1189
		dev_activate(dev);
1190
		add_device_randomness(dev->dev_addr, dev->addr_len);
L
Linus Torvalds 已提交
1191
	}
1192

L
Linus Torvalds 已提交
1193 1194 1195 1196
	return ret;
}

/**
1197 1198
 *	dev_open	- prepare an interface for use.
 *	@dev:	device to open
L
Linus Torvalds 已提交
1199
 *
1200 1201 1202 1203 1204 1205 1206
 *	Takes a device from down to up state. The device's private open
 *	function is invoked and then the multicast lists are loaded. Finally
 *	the device is moved into the up state and a %NETDEV_UP message is
 *	sent to the netdev notifier chain.
 *
 *	Calling this function on an active interface is a nop. On a failure
 *	a negative errno code is returned.
L
Linus Torvalds 已提交
1207
 */
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
int dev_open(struct net_device *dev)
{
	int ret;

	if (dev->flags & IFF_UP)
		return 0;

	ret = __dev_open(dev);
	if (ret < 0)
		return ret;

	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
	call_netdevice_notifiers(NETDEV_UP, dev);

	return ret;
}
EXPORT_SYMBOL(dev_open);

1226
static int __dev_close_many(struct list_head *head)
L
Linus Torvalds 已提交
1227
{
1228
	struct net_device *dev;
1229

1230
	ASSERT_RTNL();
1231 1232
	might_sleep();

1233 1234
	list_for_each_entry(dev, head, unreg_list) {
		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
L
Linus Torvalds 已提交
1235

1236
		clear_bit(__LINK_STATE_START, &dev->state);
L
Linus Torvalds 已提交
1237

1238 1239 1240 1241 1242 1243 1244 1245
		/* Synchronize to scheduled poll. We cannot touch poll list, it
		 * can be even on different cpu. So just clear netif_running().
		 *
		 * dev->stop() will invoke napi_disable() on all of it's
		 * napi_struct instances on this device.
		 */
		smp_mb__after_clear_bit(); /* Commit netif_running(). */
	}
L
Linus Torvalds 已提交
1246

1247
	dev_deactivate_many(head);
1248

1249 1250
	list_for_each_entry(dev, head, unreg_list) {
		const struct net_device_ops *ops = dev->netdev_ops;
L
Linus Torvalds 已提交
1251

1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
		/*
		 *	Call the device specific close. This cannot fail.
		 *	Only if device is UP
		 *
		 *	We allow it to be called even after a DETACH hot-plug
		 *	event.
		 */
		if (ops->ndo_stop)
			ops->ndo_stop(dev);

		dev->flags &= ~IFF_UP;
		net_dmaengine_put();
	}

	return 0;
}

static int __dev_close(struct net_device *dev)
{
1271
	int retval;
1272 1273 1274
	LIST_HEAD(single);

	list_add(&dev->unreg_list, &single);
1275 1276 1277
	retval = __dev_close_many(&single);
	list_del(&single);
	return retval;
1278 1279
}

E
Eric Dumazet 已提交
1280
static int dev_close_many(struct list_head *head)
1281 1282 1283
{
	struct net_device *dev, *tmp;
	LIST_HEAD(tmp_list);
L
Linus Torvalds 已提交
1284

1285 1286 1287 1288 1289
	list_for_each_entry_safe(dev, tmp, head, unreg_list)
		if (!(dev->flags & IFF_UP))
			list_move(&dev->unreg_list, &tmp_list);

	__dev_close_many(head);
L
Linus Torvalds 已提交
1290

1291 1292 1293 1294
	list_for_each_entry(dev, head, unreg_list) {
		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
		call_netdevice_notifiers(NETDEV_DOWN, dev);
	}
1295

1296 1297
	/* rollback_registered_many needs the complete original list */
	list_splice(&tmp_list, head);
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
	return 0;
}

/**
 *	dev_close - shutdown an interface.
 *	@dev: device to shutdown
 *
 *	This function moves an active device into down state. A
 *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
 *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
 *	chain.
 */
int dev_close(struct net_device *dev)
{
1312 1313
	if (dev->flags & IFF_UP) {
		LIST_HEAD(single);
L
Linus Torvalds 已提交
1314

1315 1316 1317 1318
		list_add(&dev->unreg_list, &single);
		dev_close_many(&single);
		list_del(&single);
	}
L
Linus Torvalds 已提交
1319 1320
	return 0;
}
E
Eric Dumazet 已提交
1321
EXPORT_SYMBOL(dev_close);
L
Linus Torvalds 已提交
1322 1323


1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
/**
 *	dev_disable_lro - disable Large Receive Offload on a device
 *	@dev: device
 *
 *	Disable Large Receive Offload (LRO) on a net device.  Must be
 *	called under RTNL.  This is needed if received packets may be
 *	forwarded to another interface.
 */
void dev_disable_lro(struct net_device *dev)
{
1334 1335 1336 1337 1338 1339 1340
	/*
	 * If we're trying to disable lro on a vlan device
	 * use the underlying physical device instead
	 */
	if (is_vlan_dev(dev))
		dev = vlan_dev_real_dev(dev);

M
Michał Mirosław 已提交
1341 1342
	dev->wanted_features &= ~NETIF_F_LRO;
	netdev_update_features(dev);
1343

1344 1345
	if (unlikely(dev->features & NETIF_F_LRO))
		netdev_WARN(dev, "failed to disable LRO!\n");
1346 1347 1348 1349
}
EXPORT_SYMBOL(dev_disable_lro);


1350 1351
static int dev_boot_phase = 1;

L
Linus Torvalds 已提交
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
/**
 *	register_netdevice_notifier - register a network notifier block
 *	@nb: notifier
 *
 *	Register a notifier to be called when network device events occur.
 *	The notifier passed is linked into the kernel structures and must
 *	not be reused until it has been unregistered. A negative errno code
 *	is returned on a failure.
 *
 * 	When registered all registration and up events are replayed
1362
 *	to the new notifier to allow device to have a race free
L
Linus Torvalds 已提交
1363 1364 1365 1366 1367 1368
 *	view of the network device list.
 */

int register_netdevice_notifier(struct notifier_block *nb)
{
	struct net_device *dev;
1369
	struct net_device *last;
1370
	struct net *net;
L
Linus Torvalds 已提交
1371 1372 1373
	int err;

	rtnl_lock();
1374
	err = raw_notifier_chain_register(&netdev_chain, nb);
1375 1376
	if (err)
		goto unlock;
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
	if (dev_boot_phase)
		goto unlock;
	for_each_net(net) {
		for_each_netdev(net, dev) {
			err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
			err = notifier_to_errno(err);
			if (err)
				goto rollback;

			if (!(dev->flags & IFF_UP))
				continue;
L
Linus Torvalds 已提交
1388

1389 1390
			nb->notifier_call(nb, NETDEV_UP, dev);
		}
L
Linus Torvalds 已提交
1391
	}
1392 1393

unlock:
L
Linus Torvalds 已提交
1394 1395
	rtnl_unlock();
	return err;
1396 1397 1398

rollback:
	last = dev;
1399 1400 1401
	for_each_net(net) {
		for_each_netdev(net, dev) {
			if (dev == last)
1402
				goto outroll;
1403

1404 1405 1406 1407 1408
			if (dev->flags & IFF_UP) {
				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
				nb->notifier_call(nb, NETDEV_DOWN, dev);
			}
			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1409 1410
		}
	}
1411

1412
outroll:
1413
	raw_notifier_chain_unregister(&netdev_chain, nb);
1414
	goto unlock;
L
Linus Torvalds 已提交
1415
}
E
Eric Dumazet 已提交
1416
EXPORT_SYMBOL(register_netdevice_notifier);
L
Linus Torvalds 已提交
1417 1418 1419 1420 1421 1422 1423 1424 1425

/**
 *	unregister_netdevice_notifier - unregister a network notifier block
 *	@nb: notifier
 *
 *	Unregister a notifier previously registered by
 *	register_netdevice_notifier(). The notifier is unlinked into the
 *	kernel structures and may then be reused. A negative errno code
 *	is returned on a failure.
1426 1427 1428 1429
 *
 * 	After unregistering unregister and down device events are synthesized
 *	for all devices on the device list to the removed notifier to remove
 *	the need for special case cleanup code.
L
Linus Torvalds 已提交
1430 1431 1432 1433
 */

int unregister_netdevice_notifier(struct notifier_block *nb)
{
1434 1435
	struct net_device *dev;
	struct net *net;
1436 1437 1438
	int err;

	rtnl_lock();
1439
	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
	if (err)
		goto unlock;

	for_each_net(net) {
		for_each_netdev(net, dev) {
			if (dev->flags & IFF_UP) {
				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
				nb->notifier_call(nb, NETDEV_DOWN, dev);
			}
			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
		}
	}
unlock:
1453 1454
	rtnl_unlock();
	return err;
L
Linus Torvalds 已提交
1455
}
E
Eric Dumazet 已提交
1456
EXPORT_SYMBOL(unregister_netdevice_notifier);
L
Linus Torvalds 已提交
1457 1458 1459 1460

/**
 *	call_netdevice_notifiers - call all network notifier blocks
 *      @val: value passed unmodified to notifier function
1461
 *      @dev: net_device pointer passed unmodified to notifier function
L
Linus Torvalds 已提交
1462 1463
 *
 *	Call all network notifier blocks.  Parameters and return value
1464
 *	are as for raw_notifier_call_chain().
L
Linus Torvalds 已提交
1465 1466
 */

1467
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
L
Linus Torvalds 已提交
1468
{
1469
	ASSERT_RTNL();
1470
	return raw_notifier_call_chain(&netdev_chain, val, dev);
L
Linus Torvalds 已提交
1471
}
1472
EXPORT_SYMBOL(call_netdevice_notifiers);
L
Linus Torvalds 已提交
1473

1474
static struct static_key netstamp_needed __read_mostly;
1475
#ifdef HAVE_JUMP_LABEL
1476
/* We are not allowed to call static_key_slow_dec() from irq context
1477
 * If net_disable_timestamp() is called from irq context, defer the
1478
 * static_key_slow_dec() calls.
1479 1480 1481
 */
static atomic_t netstamp_needed_deferred;
#endif
L
Linus Torvalds 已提交
1482 1483 1484

void net_enable_timestamp(void)
{
1485 1486 1487 1488 1489
#ifdef HAVE_JUMP_LABEL
	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);

	if (deferred) {
		while (--deferred)
1490
			static_key_slow_dec(&netstamp_needed);
1491 1492 1493 1494
		return;
	}
#endif
	WARN_ON(in_interrupt());
1495
	static_key_slow_inc(&netstamp_needed);
L
Linus Torvalds 已提交
1496
}
E
Eric Dumazet 已提交
1497
EXPORT_SYMBOL(net_enable_timestamp);
L
Linus Torvalds 已提交
1498 1499 1500

void net_disable_timestamp(void)
{
1501 1502 1503 1504 1505 1506
#ifdef HAVE_JUMP_LABEL
	if (in_interrupt()) {
		atomic_inc(&netstamp_needed_deferred);
		return;
	}
#endif
1507
	static_key_slow_dec(&netstamp_needed);
L
Linus Torvalds 已提交
1508
}
E
Eric Dumazet 已提交
1509
EXPORT_SYMBOL(net_disable_timestamp);
L
Linus Torvalds 已提交
1510

E
Eric Dumazet 已提交
1511
static inline void net_timestamp_set(struct sk_buff *skb)
L
Linus Torvalds 已提交
1512
{
1513
	skb->tstamp.tv64 = 0;
1514
	if (static_key_false(&netstamp_needed))
1515
		__net_timestamp(skb);
L
Linus Torvalds 已提交
1516 1517
}

1518
#define net_timestamp_check(COND, SKB)			\
1519
	if (static_key_false(&netstamp_needed)) {		\
1520 1521 1522
		if ((COND) && !(SKB)->tstamp.tv64)	\
			__net_timestamp(SKB);		\
	}						\
E
Eric Dumazet 已提交
1523

1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
static int net_hwtstamp_validate(struct ifreq *ifr)
{
	struct hwtstamp_config cfg;
	enum hwtstamp_tx_types tx_type;
	enum hwtstamp_rx_filters rx_filter;
	int tx_type_valid = 0;
	int rx_filter_valid = 0;

	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
		return -EFAULT;

	if (cfg.flags) /* reserved for future extensions */
		return -EINVAL;

	tx_type = cfg.tx_type;
	rx_filter = cfg.rx_filter;

	switch (tx_type) {
	case HWTSTAMP_TX_OFF:
	case HWTSTAMP_TX_ON:
	case HWTSTAMP_TX_ONESTEP_SYNC:
		tx_type_valid = 1;
		break;
	}

	switch (rx_filter) {
	case HWTSTAMP_FILTER_NONE:
	case HWTSTAMP_FILTER_ALL:
	case HWTSTAMP_FILTER_SOME:
	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
		rx_filter_valid = 1;
		break;
	}

	if (!tx_type_valid || !rx_filter_valid)
		return -ERANGE;

	return 0;
}

1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
static inline bool is_skb_forwardable(struct net_device *dev,
				      struct sk_buff *skb)
{
	unsigned int len;

	if (!(dev->flags & IFF_UP))
		return false;

	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
	if (skb->len <= len)
		return true;

	/* if TSO is enabled, we don't care about the length as the packet
	 * could be forwarded without being segmented before
	 */
	if (skb_is_gso(skb))
		return true;

	return false;
}

1596 1597 1598 1599 1600 1601 1602 1603
/**
 * dev_forward_skb - loopback an skb to another netif
 *
 * @dev: destination network device
 * @skb: buffer to forward
 *
 * return values:
 *	NET_RX_SUCCESS	(no congestion)
1604
 *	NET_RX_DROP     (packet was dropped, but freed)
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
 *
 * dev_forward_skb can be used for injecting an skb from the
 * start_xmit function of one device into the receive queue
 * of another device.
 *
 * The receiving device may be in another namespace, so
 * we have to clear all information in the skb that could
 * impact namespace isolation.
 */
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
1616 1617 1618 1619 1620 1621 1622 1623
	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
		if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
			atomic_long_inc(&dev->rx_dropped);
			kfree_skb(skb);
			return NET_RX_DROP;
		}
	}

1624
	skb_orphan(skb);
1625
	nf_reset(skb);
1626

1627
	if (unlikely(!is_skb_forwardable(dev, skb))) {
1628
		atomic_long_inc(&dev->rx_dropped);
1629
		kfree_skb(skb);
1630
		return NET_RX_DROP;
1631
	}
1632
	skb->skb_iif = 0;
1633 1634
	skb->dev = dev;
	skb_dst_drop(skb);
1635 1636 1637
	skb->tstamp.tv64 = 0;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, dev);
1638 1639 1640
	skb->mark = 0;
	secpath_reset(skb);
	nf_reset(skb);
1641 1642 1643 1644
	return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);

1645 1646 1647 1648
static inline int deliver_skb(struct sk_buff *skb,
			      struct packet_type *pt_prev,
			      struct net_device *orig_dev)
{
1649 1650
	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
		return -ENOMEM;
1651 1652 1653 1654
	atomic_inc(&skb->users);
	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}

1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
{
	if (ptype->af_packet_priv == NULL)
		return false;

	if (ptype->id_match)
		return ptype->id_match(ptype, skb->sk);
	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
		return true;

	return false;
}

L
Linus Torvalds 已提交
1668 1669 1670 1671 1672
/*
 *	Support routine. Sends outgoing frames to any network
 *	taps currently in use.
 */

1673
static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
1674 1675
{
	struct packet_type *ptype;
1676 1677
	struct sk_buff *skb2 = NULL;
	struct packet_type *pt_prev = NULL;
1678

L
Linus Torvalds 已提交
1679 1680 1681 1682 1683 1684
	rcu_read_lock();
	list_for_each_entry_rcu(ptype, &ptype_all, list) {
		/* Never send packets back to the socket
		 * they originated from - MvS (miquels@drinkel.ow.org)
		 */
		if ((ptype->dev == dev || !ptype->dev) &&
1685
		    (!skb_loop_sk(ptype, skb))) {
1686 1687 1688 1689 1690 1691 1692
			if (pt_prev) {
				deliver_skb(skb2, pt_prev, skb->dev);
				pt_prev = ptype;
				continue;
			}

			skb2 = skb_clone(skb, GFP_ATOMIC);
L
Linus Torvalds 已提交
1693 1694 1695
			if (!skb2)
				break;

1696 1697
			net_timestamp_set(skb2);

L
Linus Torvalds 已提交
1698 1699 1700 1701
			/* skb->nh should be correctly
			   set by sender, so that the second statement is
			   just protection against buggy protocols.
			 */
1702
			skb_reset_mac_header(skb2);
L
Linus Torvalds 已提交
1703

1704
			if (skb_network_header(skb2) < skb2->data ||
1705
			    skb2->network_header > skb2->tail) {
1706 1707 1708
				net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
						     ntohs(skb2->protocol),
						     dev->name);
1709
				skb_reset_network_header(skb2);
L
Linus Torvalds 已提交
1710 1711
			}

1712
			skb2->transport_header = skb2->network_header;
L
Linus Torvalds 已提交
1713
			skb2->pkt_type = PACKET_OUTGOING;
1714
			pt_prev = ptype;
L
Linus Torvalds 已提交
1715 1716
		}
	}
1717 1718
	if (pt_prev)
		pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
L
Linus Torvalds 已提交
1719 1720 1721
	rcu_read_unlock();
}

1722 1723
/**
 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
 * @dev: Network device
 * @txq: number of queues available
 *
 * If real_num_tx_queues is changed the tc mappings may no longer be
 * valid. To resolve this verify the tc mapping remains valid and if
 * not NULL the mapping. With no priorities mapping to this
 * offset/count pair it will no longer be used. In the worst case TC0
 * is invalid nothing can be done so disable priority mappings. If is
 * expected that drivers will fix this mapping if they can before
 * calling netif_set_real_num_tx_queues.
 */
E
Eric Dumazet 已提交
1735
static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1736 1737 1738 1739 1740 1741
{
	int i;
	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];

	/* If TC0 is invalidated disable TC mapping */
	if (tc->offset + tc->count > txq) {
1742
		pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
		dev->num_tc = 0;
		return;
	}

	/* Invalidated prio to tc mappings set to TC0 */
	for (i = 1; i < TC_BITMASK + 1; i++) {
		int q = netdev_get_prio_tc_map(dev, i);

		tc = &dev->tc_to_txq[q];
		if (tc->offset + tc->count > txq) {
1753 1754
			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
				i, q);
1755 1756 1757 1758 1759
			netdev_set_prio_tc_map(dev, i, 0);
		}
	}
}

1760 1761 1762 1763
/*
 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
 */
1764
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1765
{
T
Tom Herbert 已提交
1766 1767
	int rc;

1768 1769
	if (txq < 1 || txq > dev->num_tx_queues)
		return -EINVAL;
1770

1771 1772
	if (dev->reg_state == NETREG_REGISTERED ||
	    dev->reg_state == NETREG_UNREGISTERING) {
1773 1774
		ASSERT_RTNL();

T
Tom Herbert 已提交
1775 1776
		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
						  txq);
T
Tom Herbert 已提交
1777 1778 1779
		if (rc)
			return rc;

1780 1781 1782
		if (dev->num_tc)
			netif_setup_tc(dev, txq);

1783 1784
		if (txq < dev->real_num_tx_queues)
			qdisc_reset_all_tx_gt(dev, txq);
1785
	}
1786 1787 1788

	dev->real_num_tx_queues = txq;
	return 0;
1789 1790
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1791

1792 1793 1794 1795 1796 1797 1798 1799
#ifdef CONFIG_RPS
/**
 *	netif_set_real_num_rx_queues - set actual number of RX queues used
 *	@dev: Network device
 *	@rxq: Actual number of RX queues
 *
 *	This must be called either with the rtnl_lock held or before
 *	registration of the net device.  Returns 0 on success, or a
1800 1801
 *	negative error code.  If called before registration, it always
 *	succeeds.
1802 1803 1804 1805 1806
 */
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
{
	int rc;

T
Tom Herbert 已提交
1807 1808 1809
	if (rxq < 1 || rxq > dev->num_rx_queues)
		return -EINVAL;

1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
	if (dev->reg_state == NETREG_REGISTERED) {
		ASSERT_RTNL();

		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
						  rxq);
		if (rc)
			return rc;
	}

	dev->real_num_rx_queues = rxq;
	return 0;
}
EXPORT_SYMBOL(netif_set_real_num_rx_queues);
#endif

1825 1826
/**
 * netif_get_num_default_rss_queues - default number of RSS queues
1827 1828 1829 1830
 *
 * This routine should set an upper limit on the number of RSS queues
 * used by default by multiqueue devices.
 */
1831
int netif_get_num_default_rss_queues(void)
1832 1833 1834 1835 1836
{
	return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
}
EXPORT_SYMBOL(netif_get_num_default_rss_queues);

1837
static inline void __netif_reschedule(struct Qdisc *q)
1838
{
1839 1840
	struct softnet_data *sd;
	unsigned long flags;
1841

1842 1843
	local_irq_save(flags);
	sd = &__get_cpu_var(softnet_data);
1844 1845 1846
	q->next_sched = NULL;
	*sd->output_queue_tailp = q;
	sd->output_queue_tailp = &q->next_sched;
1847 1848 1849 1850 1851 1852 1853 1854
	raise_softirq_irqoff(NET_TX_SOFTIRQ);
	local_irq_restore(flags);
}

void __netif_schedule(struct Qdisc *q)
{
	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
		__netif_reschedule(q);
1855 1856 1857
}
EXPORT_SYMBOL(__netif_schedule);

1858
void dev_kfree_skb_irq(struct sk_buff *skb)
1859
{
1860
	if (atomic_dec_and_test(&skb->users)) {
1861 1862
		struct softnet_data *sd;
		unsigned long flags;
1863

1864 1865 1866 1867 1868 1869 1870
		local_irq_save(flags);
		sd = &__get_cpu_var(softnet_data);
		skb->next = sd->completion_queue;
		sd->completion_queue = skb;
		raise_softirq_irqoff(NET_TX_SOFTIRQ);
		local_irq_restore(flags);
	}
1871
}
1872
EXPORT_SYMBOL(dev_kfree_skb_irq);
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883

void dev_kfree_skb_any(struct sk_buff *skb)
{
	if (in_irq() || irqs_disabled())
		dev_kfree_skb_irq(skb);
	else
		dev_kfree_skb(skb);
}
EXPORT_SYMBOL(dev_kfree_skb_any);


1884 1885 1886 1887 1888 1889
/**
 * netif_device_detach - mark device as removed
 * @dev: network device
 *
 * Mark device as removed from system and therefore no longer available.
 */
1890 1891 1892 1893
void netif_device_detach(struct net_device *dev)
{
	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
	    netif_running(dev)) {
1894
		netif_tx_stop_all_queues(dev);
1895 1896 1897 1898
	}
}
EXPORT_SYMBOL(netif_device_detach);

1899 1900 1901 1902 1903 1904
/**
 * netif_device_attach - mark device as attached
 * @dev: network device
 *
 * Mark device as attached from system and restart if needed.
 */
1905 1906 1907 1908
void netif_device_attach(struct net_device *dev)
{
	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
	    netif_running(dev)) {
1909
		netif_tx_wake_all_queues(dev);
1910
		__netdev_watchdog_up(dev);
1911 1912 1913 1914
	}
}
EXPORT_SYMBOL(netif_device_attach);

1915 1916
static void skb_warn_bad_offload(const struct sk_buff *skb)
{
1917
	static const netdev_features_t null_features = 0;
1918 1919 1920 1921 1922 1923 1924 1925
	struct net_device *dev = skb->dev;
	const char *driver = "";

	if (dev && dev->dev.parent)
		driver = dev_driver_string(dev->dev.parent);

	WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
	     "gso_type=%d ip_summed=%d\n",
1926 1927
	     driver, dev ? &dev->features : &null_features,
	     skb->sk ? &skb->sk->sk_route_caps : &null_features,
1928 1929 1930 1931
	     skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
	     skb_shinfo(skb)->gso_type, skb->ip_summed);
}

L
Linus Torvalds 已提交
1932 1933 1934 1935
/*
 * Invalidate hardware checksum when packet is to be mangled, and
 * complete checksum manually on outgoing path.
 */
1936
int skb_checksum_help(struct sk_buff *skb)
L
Linus Torvalds 已提交
1937
{
1938
	__wsum csum;
1939
	int ret = 0, offset;
L
Linus Torvalds 已提交
1940

1941
	if (skb->ip_summed == CHECKSUM_COMPLETE)
1942 1943 1944
		goto out_set_summed;

	if (unlikely(skb_shinfo(skb)->gso_size)) {
1945 1946
		skb_warn_bad_offload(skb);
		return -EINVAL;
L
Linus Torvalds 已提交
1947 1948
	}

1949
	offset = skb_checksum_start_offset(skb);
1950 1951 1952 1953 1954 1955 1956 1957
	BUG_ON(offset >= skb_headlen(skb));
	csum = skb_checksum(skb, offset, skb->len - offset, 0);

	offset += skb->csum_offset;
	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));

	if (skb_cloned(skb) &&
	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {
L
Linus Torvalds 已提交
1958 1959 1960 1961 1962
		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
		if (ret)
			goto out;
	}

1963
	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
1964
out_set_summed:
L
Linus Torvalds 已提交
1965
	skb->ip_summed = CHECKSUM_NONE;
1966
out:
L
Linus Torvalds 已提交
1967 1968
	return ret;
}
E
Eric Dumazet 已提交
1969
EXPORT_SYMBOL(skb_checksum_help);
L
Linus Torvalds 已提交
1970

1971 1972 1973
/**
 *	skb_gso_segment - Perform segmentation on skb.
 *	@skb: buffer to segment
1974
 *	@features: features for the output path (see dev->features)
1975 1976
 *
 *	This function segments the given skb and returns a list of segments.
1977 1978 1979
 *
 *	It may return NULL if the skb requires no segmentation.  This is
 *	only possible when GSO is used for verifying header integrity.
1980
 */
1981 1982
struct sk_buff *skb_gso_segment(struct sk_buff *skb,
	netdev_features_t features)
1983 1984 1985
{
	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
	struct packet_type *ptype;
A
Al Viro 已提交
1986
	__be16 type = skb->protocol;
1987
	int vlan_depth = ETH_HLEN;
1988
	int err;
1989

1990 1991
	while (type == htons(ETH_P_8021Q)) {
		struct vlan_hdr *vh;
1992

1993
		if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1994 1995
			return ERR_PTR(-EINVAL);

1996 1997 1998
		vh = (struct vlan_hdr *)(skb->data + vlan_depth);
		type = vh->h_vlan_encapsulated_proto;
		vlan_depth += VLAN_HLEN;
1999 2000
	}

2001
	skb_reset_mac_header(skb);
2002
	skb->mac_len = skb->network_header - skb->mac_header;
2003 2004
	__skb_pull(skb, skb->mac_len);

2005
	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2006
		skb_warn_bad_offload(skb);
2007

2008 2009 2010 2011 2012
		if (skb_header_cloned(skb) &&
		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
			return ERR_PTR(err);
	}

2013
	rcu_read_lock();
2014 2015
	list_for_each_entry_rcu(ptype,
			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2016
		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
2017
			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2018 2019 2020 2021
				err = ptype->gso_send_check(skb);
				segs = ERR_PTR(err);
				if (err || skb_gso_ok(skb, features))
					break;
2022 2023
				__skb_push(skb, (skb->data -
						 skb_network_header(skb)));
2024
			}
2025
			segs = ptype->gso_segment(skb, features);
2026 2027 2028 2029 2030
			break;
		}
	}
	rcu_read_unlock();

2031
	__skb_push(skb, skb->data - skb_mac_header(skb));
2032

2033 2034 2035 2036
	return segs;
}
EXPORT_SYMBOL(skb_gso_segment);

2037 2038 2039 2040 2041
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
void netdev_rx_csum_fault(struct net_device *dev)
{
	if (net_ratelimit()) {
2042
		pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2043 2044 2045 2046 2047 2048
		dump_stack();
	}
}
EXPORT_SYMBOL(netdev_rx_csum_fault);
#endif

L
Linus Torvalds 已提交
2049 2050 2051 2052 2053
/* Actually, we should eliminate this check as soon as we know, that:
 * 1. IOMMU is present and allows to map all the memory.
 * 2. No high memory really exists on this machine.
 */

E
Eric Dumazet 已提交
2054
static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
L
Linus Torvalds 已提交
2055
{
2056
#ifdef CONFIG_HIGHMEM
L
Linus Torvalds 已提交
2057
	int i;
2058
	if (!(dev->features & NETIF_F_HIGHDMA)) {
2059 2060 2061
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			if (PageHighMem(skb_frag_page(frag)))
2062
				return 1;
2063
		}
2064
	}
L
Linus Torvalds 已提交
2065

2066 2067
	if (PCI_DMA_BUS_IS_PHYS) {
		struct device *pdev = dev->dev.parent;
L
Linus Torvalds 已提交
2068

E
Eric Dumazet 已提交
2069 2070
		if (!pdev)
			return 0;
2071
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2072 2073
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2074 2075 2076 2077
			if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
				return 1;
		}
	}
2078
#endif
L
Linus Torvalds 已提交
2079 2080 2081
	return 0;
}

2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
struct dev_gso_cb {
	void (*destructor)(struct sk_buff *skb);
};

#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)

static void dev_gso_skb_destructor(struct sk_buff *skb)
{
	struct dev_gso_cb *cb;

	do {
		struct sk_buff *nskb = skb->next;

		skb->next = nskb->next;
		nskb->next = NULL;
		kfree_skb(nskb);
	} while (skb->next);

	cb = DEV_GSO_CB(skb);
	if (cb->destructor)
		cb->destructor(skb);
}

/**
 *	dev_gso_segment - Perform emulated hardware segmentation on skb.
 *	@skb: buffer to segment
2108
 *	@features: device features as applicable to this skb
2109 2110 2111 2112
 *
 *	This function segments the given skb and stores the list of segments
 *	in skb->next.
 */
2113
static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2114 2115
{
	struct sk_buff *segs;
2116 2117 2118 2119 2120 2121

	segs = skb_gso_segment(skb, features);

	/* Verifying header integrity only. */
	if (!segs)
		return 0;
2122

2123
	if (IS_ERR(segs))
2124 2125 2126 2127 2128 2129 2130 2131 2132
		return PTR_ERR(segs);

	skb->next = segs;
	DEV_GSO_CB(skb)->destructor = skb->destructor;
	skb->destructor = dev_gso_skb_destructor;

	return 0;
}

2133
static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2134 2135 2136 2137 2138 2139 2140 2141 2142 2143
{
	return ((features & NETIF_F_GEN_CSUM) ||
		((features & NETIF_F_V4_CSUM) &&
		 protocol == htons(ETH_P_IP)) ||
		((features & NETIF_F_V6_CSUM) &&
		 protocol == htons(ETH_P_IPV6)) ||
		((features & NETIF_F_FCOE_CRC) &&
		 protocol == htons(ETH_P_FCOE)));
}

2144 2145
static netdev_features_t harmonize_features(struct sk_buff *skb,
	__be16 protocol, netdev_features_t features)
2146
{
2147
	if (!can_checksum_protocol(features, protocol)) {
2148 2149 2150 2151 2152 2153 2154 2155 2156
		features &= ~NETIF_F_ALL_CSUM;
		features &= ~NETIF_F_SG;
	} else if (illegal_highdma(skb->dev, skb)) {
		features &= ~NETIF_F_SG;
	}

	return features;
}

2157
netdev_features_t netif_skb_features(struct sk_buff *skb)
2158 2159
{
	__be16 protocol = skb->protocol;
2160
	netdev_features_t features = skb->dev->features;
2161

2162 2163 2164
	if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
		features &= ~NETIF_F_GSO_MASK;

2165 2166 2167
	if (protocol == htons(ETH_P_8021Q)) {
		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
		protocol = veh->h_vlan_encapsulated_proto;
2168 2169 2170
	} else if (!vlan_tx_tag_present(skb)) {
		return harmonize_features(skb, protocol, features);
	}
2171

2172
	features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2173 2174 2175 2176 2177

	if (protocol != htons(ETH_P_8021Q)) {
		return harmonize_features(skb, protocol, features);
	} else {
		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2178
				NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2179 2180
		return harmonize_features(skb, protocol, features);
	}
2181
}
2182
EXPORT_SYMBOL(netif_skb_features);
2183

2184 2185 2186
/*
 * Returns true if either:
 *	1. skb has frag_list and the device doesn't support FRAGLIST, or
2187
 *	2. skb is fragmented and the device does not support SG.
2188 2189
 */
static inline int skb_needs_linearize(struct sk_buff *skb,
2190
				      int features)
2191
{
2192 2193 2194
	return skb_is_nonlinear(skb) &&
			((skb_has_frag_list(skb) &&
				!(features & NETIF_F_FRAGLIST)) ||
2195
			(skb_shinfo(skb)->nr_frags &&
2196
				!(features & NETIF_F_SG)));
2197 2198
}

2199 2200
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
			struct netdev_queue *txq)
2201
{
2202
	const struct net_device_ops *ops = dev->netdev_ops;
2203
	int rc = NETDEV_TX_OK;
2204
	unsigned int skb_len;
2205

2206
	if (likely(!skb->next)) {
2207
		netdev_features_t features;
2208

2209
		/*
L
Lucas De Marchi 已提交
2210
		 * If device doesn't need skb->dst, release it right now while
2211 2212
		 * its hot in this cpu cache
		 */
E
Eric Dumazet 已提交
2213 2214 2215
		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
			skb_dst_drop(skb);

2216 2217
		features = netif_skb_features(skb);

2218
		if (vlan_tx_tag_present(skb) &&
2219
		    !(features & NETIF_F_HW_VLAN_TX)) {
2220 2221 2222 2223 2224 2225 2226
			skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
			if (unlikely(!skb))
				goto out;

			skb->vlan_tci = 0;
		}

2227
		if (netif_needs_gso(skb, features)) {
2228
			if (unlikely(dev_gso_segment(skb, features)))
2229 2230 2231
				goto out_kfree_skb;
			if (skb->next)
				goto gso;
2232
		} else {
2233
			if (skb_needs_linearize(skb, features) &&
2234 2235 2236 2237 2238 2239 2240 2241
			    __skb_linearize(skb))
				goto out_kfree_skb;

			/* If packet is not checksummed and device does not
			 * support checksumming for this protocol, complete
			 * checksumming here.
			 */
			if (skb->ip_summed == CHECKSUM_PARTIAL) {
2242 2243
				skb_set_transport_header(skb,
					skb_checksum_start_offset(skb));
2244
				if (!(features & NETIF_F_ALL_CSUM) &&
2245 2246 2247
				     skb_checksum_help(skb))
					goto out_kfree_skb;
			}
2248 2249
		}

2250 2251 2252
		if (!list_empty(&ptype_all))
			dev_queue_xmit_nit(skb, dev);

2253
		skb_len = skb->len;
2254
		rc = ops->ndo_start_xmit(skb, dev);
2255
		trace_net_dev_xmit(skb, rc, dev, skb_len);
2256
		if (rc == NETDEV_TX_OK)
E
Eric Dumazet 已提交
2257
			txq_trans_update(txq);
2258
		return rc;
2259 2260
	}

2261
gso:
2262 2263 2264 2265 2266
	do {
		struct sk_buff *nskb = skb->next;

		skb->next = nskb->next;
		nskb->next = NULL;
2267 2268

		/*
L
Lucas De Marchi 已提交
2269
		 * If device doesn't need nskb->dst, release it right now while
2270 2271 2272 2273 2274
		 * its hot in this cpu cache
		 */
		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
			skb_dst_drop(nskb);

2275 2276 2277
		if (!list_empty(&ptype_all))
			dev_queue_xmit_nit(nskb, dev);

2278
		skb_len = nskb->len;
2279
		rc = ops->ndo_start_xmit(nskb, dev);
2280
		trace_net_dev_xmit(nskb, rc, dev, skb_len);
2281
		if (unlikely(rc != NETDEV_TX_OK)) {
2282 2283
			if (rc & ~NETDEV_TX_MASK)
				goto out_kfree_gso_skb;
2284
			nskb->next = skb->next;
2285 2286 2287
			skb->next = nskb;
			return rc;
		}
E
Eric Dumazet 已提交
2288
		txq_trans_update(txq);
2289
		if (unlikely(netif_xmit_stopped(txq) && skb->next))
2290
			return NETDEV_TX_BUSY;
2291
	} while (skb->next);
2292

2293 2294 2295
out_kfree_gso_skb:
	if (likely(skb->next == NULL))
		skb->destructor = DEV_GSO_CB(skb)->destructor;
2296 2297
out_kfree_skb:
	kfree_skb(skb);
2298
out:
2299
	return rc;
2300 2301
}

T
Tom Herbert 已提交
2302
static u32 hashrnd __read_mostly;
D
David S. Miller 已提交
2303

2304 2305 2306 2307 2308 2309
/*
 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
 * to be used as a distribution range.
 */
u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
		  unsigned int num_tx_queues)
2310
{
2311
	u32 hash;
2312 2313
	u16 qoffset = 0;
	u16 qcount = num_tx_queues;
D
David S. Miller 已提交
2314

2315 2316
	if (skb_rx_queue_recorded(skb)) {
		hash = skb_get_rx_queue(skb);
2317 2318
		while (unlikely(hash >= num_tx_queues))
			hash -= num_tx_queues;
2319 2320
		return hash;
	}
2321

2322 2323 2324 2325 2326 2327
	if (dev->num_tc) {
		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
		qoffset = dev->tc_to_txq[tc].offset;
		qcount = dev->tc_to_txq[tc].count;
	}

2328
	if (skb->sk && skb->sk->sk_hash)
2329
		hash = skb->sk->sk_hash;
2330
	else
E
Eric Dumazet 已提交
2331
		hash = (__force u16) skb->protocol;
T
Tom Herbert 已提交
2332
	hash = jhash_1word(hash, hashrnd);
D
David S. Miller 已提交
2333

2334
	return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2335
}
2336
EXPORT_SYMBOL(__skb_tx_hash);
2337

2338 2339 2340
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2341 2342 2343
		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
				     dev->name, queue_index,
				     dev->real_num_tx_queues);
2344 2345 2346 2347 2348
		return 0;
	}
	return queue_index;
}

T
Tom Herbert 已提交
2349 2350
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
{
T
Tom Herbert 已提交
2351
#ifdef CONFIG_XPS
T
Tom Herbert 已提交
2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386
	struct xps_dev_maps *dev_maps;
	struct xps_map *map;
	int queue_index = -1;

	rcu_read_lock();
	dev_maps = rcu_dereference(dev->xps_maps);
	if (dev_maps) {
		map = rcu_dereference(
		    dev_maps->cpu_map[raw_smp_processor_id()]);
		if (map) {
			if (map->len == 1)
				queue_index = map->queues[0];
			else {
				u32 hash;
				if (skb->sk && skb->sk->sk_hash)
					hash = skb->sk->sk_hash;
				else
					hash = (__force u16) skb->protocol ^
					    skb->rxhash;
				hash = jhash_1word(hash, hashrnd);
				queue_index = map->queues[
				    ((u64)hash * map->len) >> 32];
			}
			if (unlikely(queue_index >= dev->real_num_tx_queues))
				queue_index = -1;
		}
	}
	rcu_read_unlock();

	return queue_index;
#else
	return -1;
#endif
}

2387 2388 2389
static struct netdev_queue *dev_pick_tx(struct net_device *dev,
					struct sk_buff *skb)
{
2390
	int queue_index;
2391
	const struct net_device_ops *ops = dev->netdev_ops;
2392

2393 2394 2395
	if (dev->real_num_tx_queues == 1)
		queue_index = 0;
	else if (ops->ndo_select_queue) {
2396 2397 2398 2399 2400
		queue_index = ops->ndo_select_queue(dev, skb);
		queue_index = dev_cap_txqueue(dev, queue_index);
	} else {
		struct sock *sk = skb->sk;
		queue_index = sk_tx_queue_get(sk);
2401

2402 2403 2404
		if (queue_index < 0 || skb->ooo_okay ||
		    queue_index >= dev->real_num_tx_queues) {
			int old_index = queue_index;
2405

T
Tom Herbert 已提交
2406 2407 2408
			queue_index = get_xps_queue(dev, skb);
			if (queue_index < 0)
				queue_index = skb_tx_hash(dev, skb);
2409 2410 2411 2412

			if (queue_index != old_index && sk) {
				struct dst_entry *dst =
				    rcu_dereference_check(sk->sk_dst_cache, 1);
E
Eric Dumazet 已提交
2413 2414 2415 2416

				if (dst && skb_dst(skb) == dst)
					sk_tx_queue_set(sk, queue_index);
			}
2417 2418
		}
	}
2419

2420 2421
	skb_set_queue_mapping(skb, queue_index);
	return netdev_get_tx_queue(dev, queue_index);
2422 2423
}

2424 2425 2426 2427 2428
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
				 struct net_device *dev,
				 struct netdev_queue *txq)
{
	spinlock_t *root_lock = qdisc_lock(q);
E
Eric Dumazet 已提交
2429
	bool contended;
2430 2431
	int rc;

E
Eric Dumazet 已提交
2432 2433
	qdisc_skb_cb(skb)->pkt_len = skb->len;
	qdisc_calculate_pkt_len(skb, q);
2434 2435 2436 2437 2438 2439
	/*
	 * Heuristic to force contended enqueues to serialize on a
	 * separate lock before trying to get qdisc main lock.
	 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
	 * and dequeue packets faster.
	 */
E
Eric Dumazet 已提交
2440
	contended = qdisc_is_running(q);
2441 2442 2443
	if (unlikely(contended))
		spin_lock(&q->busylock);

2444 2445 2446 2447 2448
	spin_lock(root_lock);
	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
		kfree_skb(skb);
		rc = NET_XMIT_DROP;
	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2449
		   qdisc_run_begin(q)) {
2450 2451 2452 2453 2454
		/*
		 * This is a work-conserving queue; there are no old skbs
		 * waiting to be sent out; and the qdisc is not running -
		 * xmit the skb directly.
		 */
E
Eric Dumazet 已提交
2455 2456
		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
			skb_dst_force(skb);
2457 2458 2459

		qdisc_bstats_update(q, skb);

2460 2461 2462 2463 2464
		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
			if (unlikely(contended)) {
				spin_unlock(&q->busylock);
				contended = false;
			}
2465
			__qdisc_run(q);
2466
		} else
2467
			qdisc_run_end(q);
2468 2469 2470

		rc = NET_XMIT_SUCCESS;
	} else {
E
Eric Dumazet 已提交
2471
		skb_dst_force(skb);
E
Eric Dumazet 已提交
2472
		rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2473 2474 2475 2476 2477 2478 2479
		if (qdisc_run_begin(q)) {
			if (unlikely(contended)) {
				spin_unlock(&q->busylock);
				contended = false;
			}
			__qdisc_run(q);
		}
2480 2481
	}
	spin_unlock(root_lock);
2482 2483
	if (unlikely(contended))
		spin_unlock(&q->busylock);
2484 2485 2486
	return rc;
}

2487 2488 2489
#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
static void skb_update_prio(struct sk_buff *skb)
{
I
Igor Maravic 已提交
2490
	struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2491

2492 2493 2494 2495 2496 2497
	if (!skb->priority && skb->sk && map) {
		unsigned int prioidx = skb->sk->sk_cgrp_prioidx;

		if (prioidx < map->priomap_len)
			skb->priority = map->priomap[prioidx];
	}
2498 2499 2500 2501 2502
}
#else
#define skb_update_prio(skb)
#endif

2503
static DEFINE_PER_CPU(int, xmit_recursion);
2504
#define RECURSION_LIMIT 10
2505

2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522
/**
 *	dev_loopback_xmit - loop back @skb
 *	@skb: buffer to transmit
 */
int dev_loopback_xmit(struct sk_buff *skb)
{
	skb_reset_mac_header(skb);
	__skb_pull(skb, skb_network_offset(skb));
	skb->pkt_type = PACKET_LOOPBACK;
	skb->ip_summed = CHECKSUM_UNNECESSARY;
	WARN_ON(!skb_dst(skb));
	skb_dst_force(skb);
	netif_rx_ni(skb);
	return 0;
}
EXPORT_SYMBOL(dev_loopback_xmit);

2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547
/**
 *	dev_queue_xmit - transmit a buffer
 *	@skb: buffer to transmit
 *
 *	Queue a buffer for transmission to a network device. The caller must
 *	have set the device and priority and built the buffer before calling
 *	this function. The function can be called from an interrupt.
 *
 *	A negative errno code is returned on a failure. A success does not
 *	guarantee the frame will be transmitted as it may be dropped due
 *	to congestion or traffic shaping.
 *
 * -----------------------------------------------------------------------------------
 *      I notice this method can also return errors from the queue disciplines,
 *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
 *      be positive.
 *
 *      Regardless of the return value, the skb is consumed, so it is currently
 *      difficult to retry a send to this method.  (You can bump the ref count
 *      before sending to hold a reference for retry if you are careful.)
 *
 *      When calling this method, interrupts MUST be enabled.  This is because
 *      the BH enable code must have IRQs enabled so that it will not deadlock.
 *          --BLG
 */
L
Linus Torvalds 已提交
2548 2549 2550
int dev_queue_xmit(struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
2551
	struct netdev_queue *txq;
L
Linus Torvalds 已提交
2552 2553 2554
	struct Qdisc *q;
	int rc = -ENOMEM;

2555 2556
	/* Disable soft irqs for various locks below. Also
	 * stops preemption for RCU.
L
Linus Torvalds 已提交
2557
	 */
2558
	rcu_read_lock_bh();
L
Linus Torvalds 已提交
2559

2560 2561
	skb_update_prio(skb);

2562
	txq = dev_pick_tx(dev, skb);
2563
	q = rcu_dereference_bh(txq->qdisc);
2564

L
Linus Torvalds 已提交
2565
#ifdef CONFIG_NET_CLS_ACT
E
Eric Dumazet 已提交
2566
	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
L
Linus Torvalds 已提交
2567
#endif
2568
	trace_net_dev_queue(skb);
L
Linus Torvalds 已提交
2569
	if (q->enqueue) {
2570
		rc = __dev_xmit_skb(skb, q, dev, txq);
2571
		goto out;
L
Linus Torvalds 已提交
2572 2573 2574 2575 2576
	}

	/* The device has no queue. Common case for software devices:
	   loopback, all the sorts of tunnels...

H
Herbert Xu 已提交
2577 2578
	   Really, it is unlikely that netif_tx_lock protection is necessary
	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
L
Linus Torvalds 已提交
2579 2580 2581 2582 2583 2584 2585 2586 2587 2588
	   counters.)
	   However, it is possible, that they rely on protection
	   made by us here.

	   Check this and shot the lock. It is not prone from deadlocks.
	   Either shot noqueue qdisc, it is even simpler 8)
	 */
	if (dev->flags & IFF_UP) {
		int cpu = smp_processor_id(); /* ok because BHs are off */

2589
		if (txq->xmit_lock_owner != cpu) {
L
Linus Torvalds 已提交
2590

2591 2592 2593
			if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
				goto recursion_alert;

2594
			HARD_TX_LOCK(dev, txq, cpu);
L
Linus Torvalds 已提交
2595

2596
			if (!netif_xmit_stopped(txq)) {
2597
				__this_cpu_inc(xmit_recursion);
2598
				rc = dev_hard_start_xmit(skb, dev, txq);
2599
				__this_cpu_dec(xmit_recursion);
2600
				if (dev_xmit_complete(rc)) {
2601
					HARD_TX_UNLOCK(dev, txq);
L
Linus Torvalds 已提交
2602 2603 2604
					goto out;
				}
			}
2605
			HARD_TX_UNLOCK(dev, txq);
2606 2607
			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
					     dev->name);
L
Linus Torvalds 已提交
2608 2609
		} else {
			/* Recursion is detected! It is possible,
2610 2611 2612
			 * unfortunately
			 */
recursion_alert:
2613 2614
			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
					     dev->name);
L
Linus Torvalds 已提交
2615 2616 2617 2618
		}
	}

	rc = -ENETDOWN;
2619
	rcu_read_unlock_bh();
L
Linus Torvalds 已提交
2620 2621 2622 2623

	kfree_skb(skb);
	return rc;
out:
2624
	rcu_read_unlock_bh();
L
Linus Torvalds 已提交
2625 2626
	return rc;
}
E
Eric Dumazet 已提交
2627
EXPORT_SYMBOL(dev_queue_xmit);
L
Linus Torvalds 已提交
2628 2629 2630 2631 2632 2633


/*=======================================================================
			Receiver routines
  =======================================================================*/

2634
int netdev_max_backlog __read_mostly = 1000;
E
Eric Dumazet 已提交
2635
int netdev_tstamp_prequeue __read_mostly = 1;
2636 2637
int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64;            /* old backlog weight */
L
Linus Torvalds 已提交
2638

E
Eric Dumazet 已提交
2639 2640 2641 2642 2643 2644 2645 2646
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
				     struct napi_struct *napi)
{
	list_add_tail(&napi->poll_list, &sd->poll_list);
	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
}

T
Tom Herbert 已提交
2647
/*
2648
 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2649 2650 2651
 * and src/dst port numbers.  Sets rxhash in skb to non-zero hash value
 * on success, zero indicates no valid hash.  Also, sets l4_rxhash in skb
 * if hash is a canonical 4-tuple hash over transport ports.
T
Tom Herbert 已提交
2652
 */
2653
void __skb_get_rxhash(struct sk_buff *skb)
T
Tom Herbert 已提交
2654
{
2655 2656
	struct flow_keys keys;
	u32 hash;
2657

2658 2659
	if (!skb_flow_dissect(skb, &keys))
		return;
2660

2661
	if (keys.ports)
2662
		skb->l4_rxhash = 1;
T
Tom Herbert 已提交
2663

E
Eric Dumazet 已提交
2664
	/* get a consistent hash (same value on both flow directions) */
2665 2666 2667
	if (((__force u32)keys.dst < (__force u32)keys.src) ||
	    (((__force u32)keys.dst == (__force u32)keys.src) &&
	     ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
2668
		swap(keys.dst, keys.src);
2669 2670
		swap(keys.port16[0], keys.port16[1]);
	}
T
Tom Herbert 已提交
2671

2672 2673 2674
	hash = jhash_3words((__force u32)keys.dst,
			    (__force u32)keys.src,
			    (__force u32)keys.ports, hashrnd);
2675 2676 2677
	if (!hash)
		hash = 1;

2678
	skb->rxhash = hash;
2679 2680 2681 2682 2683 2684
}
EXPORT_SYMBOL(__skb_get_rxhash);

#ifdef CONFIG_RPS

/* One global table that all flow-based protocols share. */
E
Eric Dumazet 已提交
2685
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2686 2687
EXPORT_SYMBOL(rps_sock_flow_table);

2688
struct static_key rps_needed __read_mostly;
2689

2690 2691 2692 2693
static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
	    struct rps_dev_flow *rflow, u16 next_cpu)
{
2694
	if (next_cpu != RPS_NO_CPU) {
2695 2696 2697 2698 2699 2700 2701 2702 2703
#ifdef CONFIG_RFS_ACCEL
		struct netdev_rx_queue *rxqueue;
		struct rps_dev_flow_table *flow_table;
		struct rps_dev_flow *old_rflow;
		u32 flow_id;
		u16 rxq_index;
		int rc;

		/* Should we steer this flow to a different hardware queue? */
2704 2705
		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
		    !(dev->features & NETIF_F_NTUPLE))
2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727
			goto out;
		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
		if (rxq_index == skb_get_rx_queue(skb))
			goto out;

		rxqueue = dev->_rx + rxq_index;
		flow_table = rcu_dereference(rxqueue->rps_flow_table);
		if (!flow_table)
			goto out;
		flow_id = skb->rxhash & flow_table->mask;
		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
							rxq_index, flow_id);
		if (rc < 0)
			goto out;
		old_rflow = rflow;
		rflow = &flow_table->flows[flow_id];
		rflow->filter = rc;
		if (old_rflow->filter == rflow->filter)
			old_rflow->filter = RPS_NO_FILTER;
	out:
#endif
		rflow->last_qtail =
2728
			per_cpu(softnet_data, next_cpu).input_queue_head;
2729 2730
	}

2731
	rflow->cpu = next_cpu;
2732 2733 2734
	return rflow;
}

2735 2736 2737 2738 2739 2740 2741 2742 2743
/*
 * get_rps_cpu is called from netif_receive_skb and returns the target
 * CPU from the RPS map of the receiving queue for a given skb.
 * rcu_read_lock must be held on entry.
 */
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
		       struct rps_dev_flow **rflowp)
{
	struct netdev_rx_queue *rxqueue;
E
Eric Dumazet 已提交
2744
	struct rps_map *map;
2745 2746 2747 2748 2749 2750 2751
	struct rps_dev_flow_table *flow_table;
	struct rps_sock_flow_table *sock_flow_table;
	int cpu = -1;
	u16 tcpu;

	if (skb_rx_queue_recorded(skb)) {
		u16 index = skb_get_rx_queue(skb);
2752 2753 2754 2755 2756
		if (unlikely(index >= dev->real_num_rx_queues)) {
			WARN_ONCE(dev->real_num_rx_queues > 1,
				  "%s received packet on queue %u, but number "
				  "of RX queues is %u\n",
				  dev->name, index, dev->real_num_rx_queues);
2757 2758 2759 2760 2761 2762
			goto done;
		}
		rxqueue = dev->_rx + index;
	} else
		rxqueue = dev->_rx;

E
Eric Dumazet 已提交
2763 2764
	map = rcu_dereference(rxqueue->rps_map);
	if (map) {
2765
		if (map->len == 1 &&
2766
		    !rcu_access_pointer(rxqueue->rps_flow_table)) {
2767 2768 2769 2770 2771
			tcpu = map->cpus[0];
			if (cpu_online(tcpu))
				cpu = tcpu;
			goto done;
		}
2772
	} else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2773
		goto done;
2774
	}
2775

2776
	skb_reset_network_header(skb);
2777 2778 2779
	if (!skb_get_rxhash(skb))
		goto done;

T
Tom Herbert 已提交
2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805
	flow_table = rcu_dereference(rxqueue->rps_flow_table);
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	if (flow_table && sock_flow_table) {
		u16 next_cpu;
		struct rps_dev_flow *rflow;

		rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
		tcpu = rflow->cpu;

		next_cpu = sock_flow_table->ents[skb->rxhash &
		    sock_flow_table->mask];

		/*
		 * If the desired CPU (where last recvmsg was done) is
		 * different from current CPU (one in the rx-queue flow
		 * table entry), switch if one of the following holds:
		 *   - Current CPU is unset (equal to RPS_NO_CPU).
		 *   - Current CPU is offline.
		 *   - The current CPU's queue tail has advanced beyond the
		 *     last packet that was enqueued using this table entry.
		 *     This guarantees that all previous packets for the flow
		 *     have been dequeued, thus preserving in order delivery.
		 */
		if (unlikely(tcpu != next_cpu) &&
		    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2806 2807 2808
		      rflow->last_qtail)) >= 0))
			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);

T
Tom Herbert 已提交
2809 2810 2811 2812 2813 2814 2815
		if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
			*rflowp = rflow;
			cpu = tcpu;
			goto done;
		}
	}

T
Tom Herbert 已提交
2816
	if (map) {
T
Tom Herbert 已提交
2817
		tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
T
Tom Herbert 已提交
2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828

		if (cpu_online(tcpu)) {
			cpu = tcpu;
			goto done;
		}
	}

done:
	return cpu;
}

2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868
#ifdef CONFIG_RFS_ACCEL

/**
 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
 * @dev: Device on which the filter was set
 * @rxq_index: RX queue index
 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
 *
 * Drivers that implement ndo_rx_flow_steer() should periodically call
 * this function for each installed filter and remove the filters for
 * which it returns %true.
 */
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
			 u32 flow_id, u16 filter_id)
{
	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
	struct rps_dev_flow_table *flow_table;
	struct rps_dev_flow *rflow;
	bool expire = true;
	int cpu;

	rcu_read_lock();
	flow_table = rcu_dereference(rxqueue->rps_flow_table);
	if (flow_table && flow_id <= flow_table->mask) {
		rflow = &flow_table->flows[flow_id];
		cpu = ACCESS_ONCE(rflow->cpu);
		if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
			   rflow->last_qtail) <
		     (int)(10 * flow_table->mask)))
			expire = false;
	}
	rcu_read_unlock();
	return expire;
}
EXPORT_SYMBOL(rps_may_expire_flow);

#endif /* CONFIG_RFS_ACCEL */

T
Tom Herbert 已提交
2869
/* Called from hardirq (IPI) context */
E
Eric Dumazet 已提交
2870
static void rps_trigger_softirq(void *data)
T
Tom Herbert 已提交
2871
{
E
Eric Dumazet 已提交
2872 2873
	struct softnet_data *sd = data;

E
Eric Dumazet 已提交
2874
	____napi_schedule(sd, &sd->backlog);
C
Changli Gao 已提交
2875
	sd->received_rps++;
T
Tom Herbert 已提交
2876
}
E
Eric Dumazet 已提交
2877

T
Tom Herbert 已提交
2878
#endif /* CONFIG_RPS */
T
Tom Herbert 已提交
2879

E
Eric Dumazet 已提交
2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900
/*
 * Check if this softnet_data structure is another cpu one
 * If yes, queue it to our IPI list and return 1
 * If no, return 0
 */
static int rps_ipi_queued(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
	struct softnet_data *mysd = &__get_cpu_var(softnet_data);

	if (sd != mysd) {
		sd->rps_ipi_next = mysd->rps_ipi_list;
		mysd->rps_ipi_list = sd;

		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
		return 1;
	}
#endif /* CONFIG_RPS */
	return 0;
}

T
Tom Herbert 已提交
2901 2902 2903 2904
/*
 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
 * queue (may be a remote CPU queue).
 */
T
Tom Herbert 已提交
2905 2906
static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
			      unsigned int *qtail)
T
Tom Herbert 已提交
2907
{
E
Eric Dumazet 已提交
2908
	struct softnet_data *sd;
T
Tom Herbert 已提交
2909 2910
	unsigned long flags;

E
Eric Dumazet 已提交
2911
	sd = &per_cpu(softnet_data, cpu);
T
Tom Herbert 已提交
2912 2913 2914

	local_irq_save(flags);

E
Eric Dumazet 已提交
2915
	rps_lock(sd);
2916 2917
	if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
		if (skb_queue_len(&sd->input_pkt_queue)) {
T
Tom Herbert 已提交
2918
enqueue:
E
Eric Dumazet 已提交
2919
			__skb_queue_tail(&sd->input_pkt_queue, skb);
2920
			input_queue_tail_incr_save(sd, qtail);
E
Eric Dumazet 已提交
2921
			rps_unlock(sd);
2922
			local_irq_restore(flags);
T
Tom Herbert 已提交
2923 2924 2925
			return NET_RX_SUCCESS;
		}

2926 2927 2928 2929
		/* Schedule NAPI for backlog device
		 * We can use non atomic operation since we own the queue lock
		 */
		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
E
Eric Dumazet 已提交
2930
			if (!rps_ipi_queued(sd))
E
Eric Dumazet 已提交
2931
				____napi_schedule(sd, &sd->backlog);
T
Tom Herbert 已提交
2932 2933 2934 2935
		}
		goto enqueue;
	}

C
Changli Gao 已提交
2936
	sd->dropped++;
E
Eric Dumazet 已提交
2937
	rps_unlock(sd);
T
Tom Herbert 已提交
2938 2939 2940

	local_irq_restore(flags);

2941
	atomic_long_inc(&skb->dev->rx_dropped);
T
Tom Herbert 已提交
2942 2943 2944
	kfree_skb(skb);
	return NET_RX_DROP;
}
L
Linus Torvalds 已提交
2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962

/**
 *	netif_rx	-	post buffer to the network code
 *	@skb: buffer to post
 *
 *	This function receives a packet from a device driver and queues it for
 *	the upper (protocol) levels to process.  It always succeeds. The buffer
 *	may be dropped during processing for congestion control or by the
 *	protocol layers.
 *
 *	return values:
 *	NET_RX_SUCCESS	(no congestion)
 *	NET_RX_DROP     (packet was dropped)
 *
 */

int netif_rx(struct sk_buff *skb)
{
2963
	int ret;
L
Linus Torvalds 已提交
2964 2965 2966 2967 2968

	/* if netpoll wants it, pretend we never saw it */
	if (netpoll_rx(skb))
		return NET_RX_DROP;

2969
	net_timestamp_check(netdev_tstamp_prequeue, skb);
L
Linus Torvalds 已提交
2970

2971
	trace_netif_rx(skb);
E
Eric Dumazet 已提交
2972
#ifdef CONFIG_RPS
2973
	if (static_key_false(&rps_needed)) {
T
Tom Herbert 已提交
2974
		struct rps_dev_flow voidflow, *rflow = &voidflow;
2975 2976
		int cpu;

2977
		preempt_disable();
2978
		rcu_read_lock();
T
Tom Herbert 已提交
2979 2980

		cpu = get_rps_cpu(skb->dev, skb, &rflow);
2981 2982
		if (cpu < 0)
			cpu = smp_processor_id();
T
Tom Herbert 已提交
2983 2984 2985

		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);

2986
		rcu_read_unlock();
2987
		preempt_enable();
2988 2989
	} else
#endif
T
Tom Herbert 已提交
2990 2991 2992 2993 2994
	{
		unsigned int qtail;
		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
		put_cpu();
	}
2995
	return ret;
L
Linus Torvalds 已提交
2996
}
E
Eric Dumazet 已提交
2997
EXPORT_SYMBOL(netif_rx);
L
Linus Torvalds 已提交
2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028

int netif_rx_ni(struct sk_buff *skb)
{
	int err;

	preempt_disable();
	err = netif_rx(skb);
	if (local_softirq_pending())
		do_softirq();
	preempt_enable();

	return err;
}
EXPORT_SYMBOL(netif_rx_ni);

static void net_tx_action(struct softirq_action *h)
{
	struct softnet_data *sd = &__get_cpu_var(softnet_data);

	if (sd->completion_queue) {
		struct sk_buff *clist;

		local_irq_disable();
		clist = sd->completion_queue;
		sd->completion_queue = NULL;
		local_irq_enable();

		while (clist) {
			struct sk_buff *skb = clist;
			clist = clist->next;

3029
			WARN_ON(atomic_read(&skb->users));
3030
			trace_kfree_skb(skb, net_tx_action);
L
Linus Torvalds 已提交
3031 3032 3033 3034 3035
			__kfree_skb(skb);
		}
	}

	if (sd->output_queue) {
3036
		struct Qdisc *head;
L
Linus Torvalds 已提交
3037 3038 3039 3040

		local_irq_disable();
		head = sd->output_queue;
		sd->output_queue = NULL;
3041
		sd->output_queue_tailp = &sd->output_queue;
L
Linus Torvalds 已提交
3042 3043 3044
		local_irq_enable();

		while (head) {
3045 3046 3047
			struct Qdisc *q = head;
			spinlock_t *root_lock;

L
Linus Torvalds 已提交
3048 3049
			head = head->next_sched;

3050
			root_lock = qdisc_lock(q);
3051
			if (spin_trylock(root_lock)) {
3052 3053 3054
				smp_mb__before_clear_bit();
				clear_bit(__QDISC_STATE_SCHED,
					  &q->state);
3055 3056
				qdisc_run(q);
				spin_unlock(root_lock);
L
Linus Torvalds 已提交
3057
			} else {
3058
				if (!test_bit(__QDISC_STATE_DEACTIVATED,
3059
					      &q->state)) {
3060
					__netif_reschedule(q);
3061 3062 3063 3064 3065
				} else {
					smp_mb__before_clear_bit();
					clear_bit(__QDISC_STATE_SCHED,
						  &q->state);
				}
L
Linus Torvalds 已提交
3066 3067 3068 3069 3070
			}
		}
	}
}

3071 3072
#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
    (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3073 3074 3075
/* This hook is defined here for ATM LANE */
int (*br_fdb_test_addr_hook)(struct net_device *dev,
			     unsigned char *addr) __read_mostly;
3076
EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3077
#endif
L
Linus Torvalds 已提交
3078 3079 3080 3081 3082 3083

#ifdef CONFIG_NET_CLS_ACT
/* TODO: Maybe we should just force sch_ingress to be compiled in
 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
 * a compare and 2 stores extra right now if we dont have it on
 * but have CONFIG_NET_CLS_ACT
L
Lucas De Marchi 已提交
3084 3085
 * NOTE: This doesn't stop any functionality; if you dont have
 * the ingress scheduler, you just can't add policies on ingress.
L
Linus Torvalds 已提交
3086 3087
 *
 */
3088
static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
L
Linus Torvalds 已提交
3089 3090
{
	struct net_device *dev = skb->dev;
3091
	u32 ttl = G_TC_RTTL(skb->tc_verd);
3092 3093
	int result = TC_ACT_OK;
	struct Qdisc *q;
3094

3095
	if (unlikely(MAX_RED_LOOP < ttl++)) {
3096 3097
		net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
				     skb->skb_iif, dev->ifindex);
3098 3099
		return TC_ACT_SHOT;
	}
L
Linus Torvalds 已提交
3100

3101 3102
	skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
L
Linus Torvalds 已提交
3103

3104
	q = rxq->qdisc;
3105
	if (q != &noop_qdisc) {
3106
		spin_lock(qdisc_lock(q));
3107 3108
		if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
			result = qdisc_enqueue_root(skb, q);
3109 3110
		spin_unlock(qdisc_lock(q));
	}
3111 3112 3113

	return result;
}
3114

3115 3116 3117 3118
static inline struct sk_buff *handle_ing(struct sk_buff *skb,
					 struct packet_type **pt_prev,
					 int *ret, struct net_device *orig_dev)
{
3119 3120 3121
	struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);

	if (!rxq || rxq->qdisc == &noop_qdisc)
3122
		goto out;
L
Linus Torvalds 已提交
3123

3124 3125 3126
	if (*pt_prev) {
		*ret = deliver_skb(skb, *pt_prev, orig_dev);
		*pt_prev = NULL;
L
Linus Torvalds 已提交
3127 3128
	}

3129
	switch (ing_filter(skb, rxq)) {
3130 3131 3132 3133 3134 3135 3136 3137 3138
	case TC_ACT_SHOT:
	case TC_ACT_STOLEN:
		kfree_skb(skb);
		return NULL;
	}

out:
	skb->tc_verd = 0;
	return skb;
L
Linus Torvalds 已提交
3139 3140 3141
}
#endif

3142 3143 3144 3145
/**
 *	netdev_rx_handler_register - register receive handler
 *	@dev: device to register a handler for
 *	@rx_handler: receive handler to register
J
Jiri Pirko 已提交
3146
 *	@rx_handler_data: data pointer that is used by rx handler
3147 3148 3149 3150 3151 3152
 *
 *	Register a receive hander for a device. This handler will then be
 *	called from __netif_receive_skb. A negative errno code is returned
 *	on a failure.
 *
 *	The caller must hold the rtnl_mutex.
3153 3154
 *
 *	For a general description of rx_handler, see enum rx_handler_result.
3155 3156
 */
int netdev_rx_handler_register(struct net_device *dev,
J
Jiri Pirko 已提交
3157 3158
			       rx_handler_func_t *rx_handler,
			       void *rx_handler_data)
3159 3160 3161 3162 3163 3164
{
	ASSERT_RTNL();

	if (dev->rx_handler)
		return -EBUSY;

J
Jiri Pirko 已提交
3165
	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183
	rcu_assign_pointer(dev->rx_handler, rx_handler);

	return 0;
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_register);

/**
 *	netdev_rx_handler_unregister - unregister receive handler
 *	@dev: device to unregister a handler from
 *
 *	Unregister a receive hander from a device.
 *
 *	The caller must hold the rtnl_mutex.
 */
void netdev_rx_handler_unregister(struct net_device *dev)
{

	ASSERT_RTNL();
3184 3185
	RCU_INIT_POINTER(dev->rx_handler, NULL);
	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3186 3187 3188
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);

3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205
/*
 * Limit the use of PFMEMALLOC reserves to those protocols that implement
 * the special handling of PFMEMALLOC skbs.
 */
static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
{
	switch (skb->protocol) {
	case __constant_htons(ETH_P_ARP):
	case __constant_htons(ETH_P_IP):
	case __constant_htons(ETH_P_IPV6):
	case __constant_htons(ETH_P_8021Q):
		return true;
	default:
		return false;
	}
}

3206
static int __netif_receive_skb(struct sk_buff *skb)
L
Linus Torvalds 已提交
3207 3208
{
	struct packet_type *ptype, *pt_prev;
3209
	rx_handler_func_t *rx_handler;
D
David S. Miller 已提交
3210
	struct net_device *orig_dev;
3211
	struct net_device *null_or_dev;
3212
	bool deliver_exact = false;
L
Linus Torvalds 已提交
3213
	int ret = NET_RX_DROP;
A
Al Viro 已提交
3214
	__be16 type;
3215
	unsigned long pflags = current->flags;
L
Linus Torvalds 已提交
3216

3217
	net_timestamp_check(!netdev_tstamp_prequeue, skb);
3218

3219
	trace_netif_receive_skb(skb);
3220

3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232
	/*
	 * PFMEMALLOC skbs are special, they should
	 * - be delivered to SOCK_MEMALLOC sockets only
	 * - stay away from userspace
	 * - have bounded memory usage
	 *
	 * Use PF_MEMALLOC as this saves us from propagating the allocation
	 * context down to all allocation sites.
	 */
	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
		current->flags |= PF_MEMALLOC;

L
Linus Torvalds 已提交
3233
	/* if we've gotten here through NAPI, check netpoll */
3234
	if (netpoll_receive_skb(skb))
3235
		goto out;
L
Linus Torvalds 已提交
3236

J
Joe Eykholt 已提交
3237
	orig_dev = skb->dev;
3238

3239
	skb_reset_network_header(skb);
3240
	skb_reset_transport_header(skb);
3241
	skb_reset_mac_len(skb);
L
Linus Torvalds 已提交
3242 3243 3244 3245 3246

	pt_prev = NULL;

	rcu_read_lock();

3247
another_round:
3248
	skb->skb_iif = skb->dev->ifindex;
3249 3250 3251

	__this_cpu_inc(softnet_data.processed);

3252 3253 3254
	if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
		skb = vlan_untag(skb);
		if (unlikely(!skb))
3255
			goto unlock;
3256 3257
	}

L
Linus Torvalds 已提交
3258 3259 3260 3261 3262 3263 3264
#ifdef CONFIG_NET_CLS_ACT
	if (skb->tc_verd & TC_NCLS) {
		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
		goto ncls;
	}
#endif

3265 3266 3267
	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
		goto skip_taps;

L
Linus Torvalds 已提交
3268
	list_for_each_entry_rcu(ptype, &ptype_all, list) {
3269
		if (!ptype->dev || ptype->dev == skb->dev) {
3270
			if (pt_prev)
D
David S. Miller 已提交
3271
				ret = deliver_skb(skb, pt_prev, orig_dev);
L
Linus Torvalds 已提交
3272 3273 3274 3275
			pt_prev = ptype;
		}
	}

3276
skip_taps:
L
Linus Torvalds 已提交
3277
#ifdef CONFIG_NET_CLS_ACT
3278 3279
	skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
	if (!skb)
3280
		goto unlock;
L
Linus Torvalds 已提交
3281 3282 3283
ncls:
#endif

3284 3285 3286 3287
	if (sk_memalloc_socks() && skb_pfmemalloc(skb)
				&& !skb_pfmemalloc_protocol(skb))
		goto drop;

3288
	rx_handler = rcu_dereference(skb->dev->rx_handler);
3289 3290 3291 3292 3293
	if (vlan_tx_tag_present(skb)) {
		if (pt_prev) {
			ret = deliver_skb(skb, pt_prev, orig_dev);
			pt_prev = NULL;
		}
3294
		if (vlan_do_receive(&skb, !rx_handler))
3295 3296
			goto another_round;
		else if (unlikely(!skb))
3297
			goto unlock;
3298 3299
	}

3300 3301 3302 3303 3304
	if (rx_handler) {
		if (pt_prev) {
			ret = deliver_skb(skb, pt_prev, orig_dev);
			pt_prev = NULL;
		}
3305 3306
		switch (rx_handler(&skb)) {
		case RX_HANDLER_CONSUMED:
3307
			goto unlock;
3308
		case RX_HANDLER_ANOTHER:
3309
			goto another_round;
3310 3311 3312 3313 3314 3315 3316
		case RX_HANDLER_EXACT:
			deliver_exact = true;
		case RX_HANDLER_PASS:
			break;
		default:
			BUG();
		}
3317
	}
L
Linus Torvalds 已提交
3318

3319
	/* deliver only exact match when indicated */
3320
	null_or_dev = deliver_exact ? skb->dev : NULL;
3321

L
Linus Torvalds 已提交
3322
	type = skb->protocol;
3323 3324
	list_for_each_entry_rcu(ptype,
			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3325
		if (ptype->type == type &&
3326 3327
		    (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
		     ptype->dev == orig_dev)) {
3328
			if (pt_prev)
D
David S. Miller 已提交
3329
				ret = deliver_skb(skb, pt_prev, orig_dev);
L
Linus Torvalds 已提交
3330 3331 3332 3333 3334
			pt_prev = ptype;
		}
	}

	if (pt_prev) {
3335 3336 3337 3338
		if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
			ret = -ENOMEM;
		else
			ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
L
Linus Torvalds 已提交
3339
	} else {
3340
drop:
3341
		atomic_long_inc(&skb->dev->rx_dropped);
L
Linus Torvalds 已提交
3342 3343 3344 3345 3346 3347 3348
		kfree_skb(skb);
		/* Jamal, now you will not able to escape explaining
		 * me how you were going to use this. :-)
		 */
		ret = NET_RX_DROP;
	}

3349
unlock:
L
Linus Torvalds 已提交
3350
	rcu_read_unlock();
3351 3352
out:
	tsk_restore_flags(current, pflags, PF_MEMALLOC);
L
Linus Torvalds 已提交
3353 3354
	return ret;
}
T
Tom Herbert 已提交
3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372

/**
 *	netif_receive_skb - process receive buffer from network
 *	@skb: buffer to process
 *
 *	netif_receive_skb() is the main receive data processing function.
 *	It always succeeds. The buffer may be dropped during processing
 *	for congestion control or by the protocol layers.
 *
 *	This function may only be called from softirq context and interrupts
 *	should be enabled.
 *
 *	Return values (usually ignored):
 *	NET_RX_SUCCESS: no congestion
 *	NET_RX_DROP: packet was dropped
 */
int netif_receive_skb(struct sk_buff *skb)
{
3373
	net_timestamp_check(netdev_tstamp_prequeue, skb);
E
Eric Dumazet 已提交
3374

3375 3376 3377
	if (skb_defer_rx_timestamp(skb))
		return NET_RX_SUCCESS;

E
Eric Dumazet 已提交
3378
#ifdef CONFIG_RPS
3379
	if (static_key_false(&rps_needed)) {
E
Eric Dumazet 已提交
3380 3381
		struct rps_dev_flow voidflow, *rflow = &voidflow;
		int cpu, ret;
T
Tom Herbert 已提交
3382

E
Eric Dumazet 已提交
3383 3384 3385
		rcu_read_lock();

		cpu = get_rps_cpu(skb->dev, skb, &rflow);
T
Tom Herbert 已提交
3386

E
Eric Dumazet 已提交
3387 3388 3389
		if (cpu >= 0) {
			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
			rcu_read_unlock();
3390
			return ret;
E
Eric Dumazet 已提交
3391
		}
3392
		rcu_read_unlock();
T
Tom Herbert 已提交
3393
	}
3394
#endif
3395
	return __netif_receive_skb(skb);
T
Tom Herbert 已提交
3396
}
E
Eric Dumazet 已提交
3397
EXPORT_SYMBOL(netif_receive_skb);
L
Linus Torvalds 已提交
3398

E
Eric Dumazet 已提交
3399 3400 3401
/* Network device is going away, flush any packets still pending
 * Called with irqs disabled.
 */
3402
static void flush_backlog(void *arg)
3403
{
3404
	struct net_device *dev = arg;
E
Eric Dumazet 已提交
3405
	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3406 3407
	struct sk_buff *skb, *tmp;

E
Eric Dumazet 已提交
3408
	rps_lock(sd);
3409
	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3410
		if (skb->dev == dev) {
E
Eric Dumazet 已提交
3411
			__skb_unlink(skb, &sd->input_pkt_queue);
3412
			kfree_skb(skb);
3413
			input_queue_head_incr(sd);
3414
		}
3415
	}
E
Eric Dumazet 已提交
3416
	rps_unlock(sd);
3417 3418 3419 3420 3421

	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
		if (skb->dev == dev) {
			__skb_unlink(skb, &sd->process_queue);
			kfree_skb(skb);
3422
			input_queue_head_incr(sd);
3423 3424
		}
	}
3425 3426
}

3427 3428 3429 3430 3431 3432 3433
static int napi_gro_complete(struct sk_buff *skb)
{
	struct packet_type *ptype;
	__be16 type = skb->protocol;
	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
	int err = -ENOENT;

3434 3435
	if (NAPI_GRO_CB(skb)->count == 1) {
		skb_shinfo(skb)->gso_size = 0;
3436
		goto out;
3437
	}
3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458

	rcu_read_lock();
	list_for_each_entry_rcu(ptype, head, list) {
		if (ptype->type != type || ptype->dev || !ptype->gro_complete)
			continue;

		err = ptype->gro_complete(skb);
		break;
	}
	rcu_read_unlock();

	if (err) {
		WARN_ON(&ptype->list == head);
		kfree_skb(skb);
		return NET_RX_SUCCESS;
	}

out:
	return netif_receive_skb(skb);
}

E
Eric Dumazet 已提交
3459
inline void napi_gro_flush(struct napi_struct *napi)
3460 3461 3462 3463 3464 3465 3466 3467 3468
{
	struct sk_buff *skb, *next;

	for (skb = napi->gro_list; skb; skb = next) {
		next = skb->next;
		skb->next = NULL;
		napi_gro_complete(skb);
	}

3469
	napi->gro_count = 0;
3470 3471
	napi->gro_list = NULL;
}
E
Eric Dumazet 已提交
3472
EXPORT_SYMBOL(napi_gro_flush);
3473

3474
enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3475 3476 3477 3478 3479
{
	struct sk_buff **pp = NULL;
	struct packet_type *ptype;
	__be16 type = skb->protocol;
	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
H
Herbert Xu 已提交
3480
	int same_flow;
3481
	int mac_len;
3482
	enum gro_result ret;
3483

3484
	if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3485 3486
		goto normal;

3487
	if (skb_is_gso(skb) || skb_has_frag_list(skb))
3488 3489
		goto normal;

3490 3491 3492 3493 3494
	rcu_read_lock();
	list_for_each_entry_rcu(ptype, head, list) {
		if (ptype->type != type || ptype->dev || !ptype->gro_receive)
			continue;

3495
		skb_set_network_header(skb, skb_gro_offset(skb));
3496 3497 3498 3499
		mac_len = skb->network_header - skb->mac_header;
		skb->mac_len = mac_len;
		NAPI_GRO_CB(skb)->same_flow = 0;
		NAPI_GRO_CB(skb)->flush = 0;
H
Herbert Xu 已提交
3500
		NAPI_GRO_CB(skb)->free = 0;
3501 3502 3503 3504 3505 3506 3507 3508 3509

		pp = ptype->gro_receive(&napi->gro_list, skb);
		break;
	}
	rcu_read_unlock();

	if (&ptype->list == head)
		goto normal;

H
Herbert Xu 已提交
3510
	same_flow = NAPI_GRO_CB(skb)->same_flow;
3511
	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
H
Herbert Xu 已提交
3512

3513 3514 3515 3516 3517 3518
	if (pp) {
		struct sk_buff *nskb = *pp;

		*pp = nskb->next;
		nskb->next = NULL;
		napi_gro_complete(nskb);
3519
		napi->gro_count--;
3520 3521
	}

H
Herbert Xu 已提交
3522
	if (same_flow)
3523 3524
		goto ok;

3525
	if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3526 3527
		goto normal;

3528
	napi->gro_count++;
3529
	NAPI_GRO_CB(skb)->count = 1;
3530
	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3531 3532
	skb->next = napi->gro_list;
	napi->gro_list = skb;
3533
	ret = GRO_HELD;
3534

3535
pull:
H
Herbert Xu 已提交
3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546
	if (skb_headlen(skb) < skb_gro_offset(skb)) {
		int grow = skb_gro_offset(skb) - skb_headlen(skb);

		BUG_ON(skb->end - skb->tail < grow);

		memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);

		skb->tail += grow;
		skb->data_len -= grow;

		skb_shinfo(skb)->frags[0].page_offset += grow;
E
Eric Dumazet 已提交
3547
		skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
H
Herbert Xu 已提交
3548

E
Eric Dumazet 已提交
3549
		if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3550
			skb_frag_unref(skb, 0);
H
Herbert Xu 已提交
3551 3552
			memmove(skb_shinfo(skb)->frags,
				skb_shinfo(skb)->frags + 1,
3553
				--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
H
Herbert Xu 已提交
3554
		}
3555 3556
	}

3557
ok:
3558
	return ret;
3559 3560

normal:
3561 3562
	ret = GRO_NORMAL;
	goto pull;
H
Herbert Xu 已提交
3563
}
3564 3565
EXPORT_SYMBOL(dev_gro_receive);

3566
static inline gro_result_t
3567
__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3568 3569
{
	struct sk_buff *p;
E
Eric Dumazet 已提交
3570
	unsigned int maclen = skb->dev->hard_header_len;
3571 3572

	for (p = napi->gro_list; p; p = p->next) {
3573 3574 3575
		unsigned long diffs;

		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3576
		diffs |= p->vlan_tci ^ skb->vlan_tci;
E
Eric Dumazet 已提交
3577 3578 3579 3580 3581 3582 3583
		if (maclen == ETH_HLEN)
			diffs |= compare_ether_header(skb_mac_header(p),
						      skb_gro_mac_header(skb));
		else if (!diffs)
			diffs = memcmp(skb_mac_header(p),
				       skb_gro_mac_header(skb),
				       maclen);
3584
		NAPI_GRO_CB(p)->same_flow = !diffs;
3585 3586 3587 3588 3589
		NAPI_GRO_CB(p)->flush = 0;
	}

	return dev_gro_receive(napi, skb);
}
H
Herbert Xu 已提交
3590

3591
gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
H
Herbert Xu 已提交
3592
{
3593 3594
	switch (ret) {
	case GRO_NORMAL:
3595 3596 3597
		if (netif_receive_skb(skb))
			ret = GRO_DROP;
		break;
H
Herbert Xu 已提交
3598

3599
	case GRO_DROP:
H
Herbert Xu 已提交
3600 3601
		kfree_skb(skb);
		break;
3602

3603
	case GRO_MERGED_FREE:
3604 3605 3606 3607
		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
			kmem_cache_free(skbuff_head_cache, skb);
		else
			__kfree_skb(skb);
3608 3609
		break;

3610 3611 3612
	case GRO_HELD:
	case GRO_MERGED:
		break;
H
Herbert Xu 已提交
3613 3614
	}

3615
	return ret;
3616 3617 3618
}
EXPORT_SYMBOL(napi_skb_finish);

3619 3620 3621 3622
void skb_gro_reset_offset(struct sk_buff *skb)
{
	NAPI_GRO_CB(skb)->data_offset = 0;
	NAPI_GRO_CB(skb)->frag0 = NULL;
3623
	NAPI_GRO_CB(skb)->frag0_len = 0;
3624

3625
	if (skb->mac_header == skb->tail &&
3626
	    !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3627
		NAPI_GRO_CB(skb)->frag0 =
3628
			skb_frag_address(&skb_shinfo(skb)->frags[0]);
E
Eric Dumazet 已提交
3629
		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3630
	}
3631 3632 3633
}
EXPORT_SYMBOL(skb_gro_reset_offset);

3634
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3635
{
3636 3637
	skb_gro_reset_offset(skb);

3638
	return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3639 3640 3641
}
EXPORT_SYMBOL(napi_gro_receive);

S
stephen hemminger 已提交
3642
static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3643 3644
{
	__skb_pull(skb, skb_headlen(skb));
3645 3646
	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3647
	skb->vlan_tci = 0;
H
Herbert Xu 已提交
3648
	skb->dev = napi->dev;
A
Andy Gospodarek 已提交
3649
	skb->skb_iif = 0;
3650 3651 3652 3653

	napi->skb = skb;
}

3654
struct sk_buff *napi_get_frags(struct napi_struct *napi)
H
Herbert Xu 已提交
3655 3656 3657 3658
{
	struct sk_buff *skb = napi->skb;

	if (!skb) {
3659 3660 3661
		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
		if (skb)
			napi->skb = skb;
3662
	}
3663 3664
	return skb;
}
3665
EXPORT_SYMBOL(napi_get_frags);
3666

3667 3668
gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
			       gro_result_t ret)
3669
{
3670 3671
	switch (ret) {
	case GRO_NORMAL:
3672
	case GRO_HELD:
3673
		skb->protocol = eth_type_trans(skb, skb->dev);
3674

3675 3676 3677 3678
		if (ret == GRO_HELD)
			skb_gro_pull(skb, -ETH_HLEN);
		else if (netif_receive_skb(skb))
			ret = GRO_DROP;
3679
		break;
H
Herbert Xu 已提交
3680

3681 3682 3683 3684
	case GRO_DROP:
	case GRO_MERGED_FREE:
		napi_reuse_skb(napi, skb);
		break;
3685 3686 3687

	case GRO_MERGED:
		break;
3688
	}
H
Herbert Xu 已提交
3689

3690
	return ret;
H
Herbert Xu 已提交
3691
}
3692 3693
EXPORT_SYMBOL(napi_frags_finish);

E
Eric Dumazet 已提交
3694
static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3695 3696 3697
{
	struct sk_buff *skb = napi->skb;
	struct ethhdr *eth;
3698 3699
	unsigned int hlen;
	unsigned int off;
3700 3701 3702 3703 3704 3705

	napi->skb = NULL;

	skb_reset_mac_header(skb);
	skb_gro_reset_offset(skb);

3706 3707 3708 3709 3710 3711 3712 3713 3714 3715
	off = skb_gro_offset(skb);
	hlen = off + sizeof(*eth);
	eth = skb_gro_header_fast(skb, off);
	if (skb_gro_header_hard(skb, hlen)) {
		eth = skb_gro_header_slow(skb, hlen, off);
		if (unlikely(!eth)) {
			napi_reuse_skb(napi, skb);
			skb = NULL;
			goto out;
		}
3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729
	}

	skb_gro_pull(skb, sizeof(*eth));

	/*
	 * This works because the only protocols we care about don't require
	 * special handling.  We'll fix it up properly at the end.
	 */
	skb->protocol = eth->h_proto;

out:
	return skb;
}

3730
gro_result_t napi_gro_frags(struct napi_struct *napi)
3731
{
3732
	struct sk_buff *skb = napi_frags_skb(napi);
3733 3734

	if (!skb)
3735
		return GRO_DROP;
3736 3737 3738

	return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
}
H
Herbert Xu 已提交
3739 3740
EXPORT_SYMBOL(napi_gro_frags);

3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768
/*
 * net_rps_action sends any pending IPI's for rps.
 * Note: called with local irq disabled, but exits with local irq enabled.
 */
static void net_rps_action_and_irq_enable(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
	struct softnet_data *remsd = sd->rps_ipi_list;

	if (remsd) {
		sd->rps_ipi_list = NULL;

		local_irq_enable();

		/* Send pending IPI's to kick RPS processing on remote cpus. */
		while (remsd) {
			struct softnet_data *next = remsd->rps_ipi_next;

			if (cpu_online(remsd->cpu))
				__smp_call_function_single(remsd->cpu,
							   &remsd->csd, 0);
			remsd = next;
		}
	} else
#endif
		local_irq_enable();
}

3769
static int process_backlog(struct napi_struct *napi, int quota)
L
Linus Torvalds 已提交
3770 3771
{
	int work = 0;
E
Eric Dumazet 已提交
3772
	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
L
Linus Torvalds 已提交
3773

3774 3775 3776 3777 3778 3779 3780 3781 3782
#ifdef CONFIG_RPS
	/* Check if we have pending ipi, its better to send them now,
	 * not waiting net_rx_action() end.
	 */
	if (sd->rps_ipi_list) {
		local_irq_disable();
		net_rps_action_and_irq_enable(sd);
	}
#endif
3783
	napi->weight = weight_p;
3784 3785
	local_irq_disable();
	while (work < quota) {
L
Linus Torvalds 已提交
3786
		struct sk_buff *skb;
3787 3788 3789 3790 3791 3792
		unsigned int qlen;

		while ((skb = __skb_dequeue(&sd->process_queue))) {
			local_irq_enable();
			__netif_receive_skb(skb);
			local_irq_disable();
3793 3794 3795 3796 3797
			input_queue_head_incr(sd);
			if (++work >= quota) {
				local_irq_enable();
				return work;
			}
3798
		}
L
Linus Torvalds 已提交
3799

E
Eric Dumazet 已提交
3800
		rps_lock(sd);
3801
		qlen = skb_queue_len(&sd->input_pkt_queue);
3802
		if (qlen)
3803 3804
			skb_queue_splice_tail_init(&sd->input_pkt_queue,
						   &sd->process_queue);
3805

3806
		if (qlen < quota - work) {
E
Eric Dumazet 已提交
3807 3808 3809 3810 3811 3812 3813 3814 3815 3816
			/*
			 * Inline a custom version of __napi_complete().
			 * only current cpu owns and manipulates this napi,
			 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
			 * we can use a plain write instead of clear_bit(),
			 * and we dont need an smp_mb() memory barrier.
			 */
			list_del(&napi->poll_list);
			napi->state = 0;

3817
			quota = work + qlen;
3818
		}
E
Eric Dumazet 已提交
3819
		rps_unlock(sd);
3820 3821
	}
	local_irq_enable();
L
Linus Torvalds 已提交
3822

3823 3824
	return work;
}
L
Linus Torvalds 已提交
3825

3826 3827
/**
 * __napi_schedule - schedule for receive
3828
 * @n: entry to schedule
3829 3830 3831
 *
 * The entry's receive function will be scheduled to run
 */
H
Harvey Harrison 已提交
3832
void __napi_schedule(struct napi_struct *n)
3833 3834
{
	unsigned long flags;
L
Linus Torvalds 已提交
3835

3836
	local_irq_save(flags);
E
Eric Dumazet 已提交
3837
	____napi_schedule(&__get_cpu_var(softnet_data), n);
3838
	local_irq_restore(flags);
L
Linus Torvalds 已提交
3839
}
3840 3841
EXPORT_SYMBOL(__napi_schedule);

3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874
void __napi_complete(struct napi_struct *n)
{
	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
	BUG_ON(n->gro_list);

	list_del(&n->poll_list);
	smp_mb__before_clear_bit();
	clear_bit(NAPI_STATE_SCHED, &n->state);
}
EXPORT_SYMBOL(__napi_complete);

void napi_complete(struct napi_struct *n)
{
	unsigned long flags;

	/*
	 * don't let napi dequeue from the cpu poll list
	 * just in case its running on a different cpu
	 */
	if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
		return;

	napi_gro_flush(n);
	local_irq_save(flags);
	__napi_complete(n);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(napi_complete);

void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
		    int (*poll)(struct napi_struct *, int), int weight)
{
	INIT_LIST_HEAD(&napi->poll_list);
3875
	napi->gro_count = 0;
3876
	napi->gro_list = NULL;
H
Herbert Xu 已提交
3877
	napi->skb = NULL;
3878 3879 3880 3881
	napi->poll = poll;
	napi->weight = weight;
	list_add(&napi->dev_list, &dev->napi_list);
	napi->dev = dev;
H
Herbert Xu 已提交
3882
#ifdef CONFIG_NETPOLL
3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893
	spin_lock_init(&napi->poll_lock);
	napi->poll_owner = -1;
#endif
	set_bit(NAPI_STATE_SCHED, &napi->state);
}
EXPORT_SYMBOL(netif_napi_add);

void netif_napi_del(struct napi_struct *napi)
{
	struct sk_buff *skb, *next;

3894
	list_del_init(&napi->dev_list);
3895
	napi_free_frags(napi);
3896 3897 3898 3899 3900 3901 3902 3903

	for (skb = napi->gro_list; skb; skb = next) {
		next = skb->next;
		skb->next = NULL;
		kfree_skb(skb);
	}

	napi->gro_list = NULL;
3904
	napi->gro_count = 0;
3905 3906 3907
}
EXPORT_SYMBOL(netif_napi_del);

L
Linus Torvalds 已提交
3908 3909
static void net_rx_action(struct softirq_action *h)
{
3910
	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3911
	unsigned long time_limit = jiffies + 2;
3912
	int budget = netdev_budget;
3913 3914
	void *have;

L
Linus Torvalds 已提交
3915 3916
	local_irq_disable();

3917
	while (!list_empty(&sd->poll_list)) {
3918 3919
		struct napi_struct *n;
		int work, weight;
L
Linus Torvalds 已提交
3920

3921
		/* If softirq window is exhuasted then punt.
3922 3923
		 * Allow this to run for 2 jiffies since which will allow
		 * an average latency of 1.5/HZ.
3924
		 */
3925
		if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
L
Linus Torvalds 已提交
3926 3927 3928 3929
			goto softnet_break;

		local_irq_enable();

3930 3931 3932 3933 3934
		/* Even though interrupts have been re-enabled, this
		 * access is safe because interrupts can only add new
		 * entries to the tail of this list, and only ->poll()
		 * calls can remove this head entry from the list.
		 */
3935
		n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
L
Linus Torvalds 已提交
3936

3937 3938 3939 3940
		have = netpoll_poll_lock(n);

		weight = n->weight;

3941 3942 3943 3944
		/* This NAPI_STATE_SCHED test is for avoiding a race
		 * with netpoll's poll_napi().  Only the entity which
		 * obtains the lock and sees NAPI_STATE_SCHED set will
		 * actually make the ->poll() call.  Therefore we avoid
L
Lucas De Marchi 已提交
3945
		 * accidentally calling ->poll() when NAPI is not scheduled.
3946 3947
		 */
		work = 0;
3948
		if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3949
			work = n->poll(n, weight);
3950 3951
			trace_napi_poll(n);
		}
3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963

		WARN_ON_ONCE(work > weight);

		budget -= work;

		local_irq_disable();

		/* Drivers must not modify the NAPI state if they
		 * consume the entire weight.  In such cases this code
		 * still "owns" the NAPI instance and therefore can
		 * move the instance around on the list at-will.
		 */
3964
		if (unlikely(work == weight)) {
3965 3966 3967 3968 3969
			if (unlikely(napi_disable_pending(n))) {
				local_irq_enable();
				napi_complete(n);
				local_irq_disable();
			} else
3970
				list_move_tail(&n->poll_list, &sd->poll_list);
3971
		}
3972 3973

		netpoll_poll_unlock(have);
L
Linus Torvalds 已提交
3974 3975
	}
out:
3976
	net_rps_action_and_irq_enable(sd);
T
Tom Herbert 已提交
3977

3978 3979 3980 3981 3982
#ifdef CONFIG_NET_DMA
	/*
	 * There may not be any more sk_buffs coming right now, so push
	 * any pending DMA copies to hardware
	 */
3983
	dma_issue_pending_all();
3984
#endif
3985

L
Linus Torvalds 已提交
3986 3987 3988
	return;

softnet_break:
C
Changli Gao 已提交
3989
	sd->time_squeeze++;
L
Linus Torvalds 已提交
3990 3991 3992 3993
	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
	goto out;
}

E
Eric Dumazet 已提交
3994
static gifconf_func_t *gifconf_list[NPROTO];
L
Linus Torvalds 已提交
3995 3996 3997 3998 3999 4000 4001 4002 4003 4004

/**
 *	register_gifconf	-	register a SIOCGIF handler
 *	@family: Address family
 *	@gifconf: Function handler
 *
 *	Register protocol dependent address dumping routines. The handler
 *	that is passed must not be freed or reused until it has been replaced
 *	by another handler.
 */
E
Eric Dumazet 已提交
4005
int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
L
Linus Torvalds 已提交
4006 4007 4008 4009 4010 4011
{
	if (family >= NPROTO)
		return -EINVAL;
	gifconf_list[family] = gifconf;
	return 0;
}
E
Eric Dumazet 已提交
4012
EXPORT_SYMBOL(register_gifconf);
L
Linus Torvalds 已提交
4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025


/*
 *	Map an interface index to its name (SIOCGIFNAME)
 */

/*
 *	We need this ioctl for efficient implementation of the
 *	if_indextoname() function required by the IPv6 API.  Without
 *	it, we would have to search all the interfaces to find a
 *	match.  --pb
 */

4026
static int dev_ifname(struct net *net, struct ifreq __user *arg)
L
Linus Torvalds 已提交
4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037
{
	struct net_device *dev;
	struct ifreq ifr;

	/*
	 *	Fetch the caller's info block.
	 */

	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
		return -EFAULT;

4038 4039
	rcu_read_lock();
	dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
L
Linus Torvalds 已提交
4040
	if (!dev) {
4041
		rcu_read_unlock();
L
Linus Torvalds 已提交
4042 4043 4044 4045
		return -ENODEV;
	}

	strcpy(ifr.ifr_name, dev->name);
4046
	rcu_read_unlock();
L
Linus Torvalds 已提交
4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058

	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
		return -EFAULT;
	return 0;
}

/*
 *	Perform a SIOCGIFCONF call. This structure will change
 *	size eventually, and there is nothing I can do about it.
 *	Thus we will need a 'compatibility mode'.
 */

4059
static int dev_ifconf(struct net *net, char __user *arg)
L
Linus Torvalds 已提交
4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082
{
	struct ifconf ifc;
	struct net_device *dev;
	char __user *pos;
	int len;
	int total;
	int i;

	/*
	 *	Fetch the caller's info block.
	 */

	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
		return -EFAULT;

	pos = ifc.ifc_buf;
	len = ifc.ifc_len;

	/*
	 *	Loop over the interfaces, and write an info block for each.
	 */

	total = 0;
4083
	for_each_netdev(net, dev) {
L
Linus Torvalds 已提交
4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096
		for (i = 0; i < NPROTO; i++) {
			if (gifconf_list[i]) {
				int done;
				if (!pos)
					done = gifconf_list[i](dev, NULL, 0);
				else
					done = gifconf_list[i](dev, pos + total,
							       len - total);
				if (done < 0)
					return -EFAULT;
				total += done;
			}
		}
4097
	}
L
Linus Torvalds 已提交
4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110

	/*
	 *	All done.  Write the updated control block back to the caller.
	 */
	ifc.ifc_len = total;

	/*
	 * 	Both BSD and Solaris return 0 here, so we do too.
	 */
	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
}

#ifdef CONFIG_PROC_FS
4111

E
Eric Dumazet 已提交
4112
#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4113 4114 4115 4116 4117

#define get_bucket(x) ((x) >> BUCKET_SPACE)
#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))

E
Eric Dumazet 已提交
4118
static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4119 4120 4121 4122 4123
{
	struct net *net = seq_file_net(seq);
	struct net_device *dev;
	struct hlist_node *p;
	struct hlist_head *h;
E
Eric Dumazet 已提交
4124
	unsigned int count = 0, offset = get_offset(*pos);
4125

E
Eric Dumazet 已提交
4126
	h = &net->dev_name_head[get_bucket(*pos)];
4127
	hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
E
Eric Dumazet 已提交
4128
		if (++count == offset)
4129 4130 4131 4132 4133 4134
			return dev;
	}

	return NULL;
}

E
Eric Dumazet 已提交
4135
static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4136 4137 4138 4139 4140
{
	struct net_device *dev;
	unsigned int bucket;

	do {
E
Eric Dumazet 已提交
4141
		dev = dev_from_same_bucket(seq, pos);
4142 4143 4144
		if (dev)
			return dev;

E
Eric Dumazet 已提交
4145 4146
		bucket = get_bucket(*pos) + 1;
		*pos = set_bucket_offset(bucket, 1);
4147 4148 4149 4150 4151
	} while (bucket < NETDEV_HASHENTRIES);

	return NULL;
}

L
Linus Torvalds 已提交
4152 4153 4154 4155
/*
 *	This is invoked by the /proc filesystem handler to display a device
 *	in detail.
 */
4156
void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4157
	__acquires(RCU)
L
Linus Torvalds 已提交
4158
{
4159
	rcu_read_lock();
4160 4161
	if (!*pos)
		return SEQ_START_TOKEN;
L
Linus Torvalds 已提交
4162

E
Eric Dumazet 已提交
4163
	if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4164
		return NULL;
L
Linus Torvalds 已提交
4165

E
Eric Dumazet 已提交
4166
	return dev_from_bucket(seq, pos);
L
Linus Torvalds 已提交
4167 4168 4169 4170
}

void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
4171
	++*pos;
E
Eric Dumazet 已提交
4172
	return dev_from_bucket(seq, pos);
L
Linus Torvalds 已提交
4173 4174 4175
}

void dev_seq_stop(struct seq_file *seq, void *v)
4176
	__releases(RCU)
L
Linus Torvalds 已提交
4177
{
4178
	rcu_read_unlock();
L
Linus Torvalds 已提交
4179 4180 4181 4182
}

static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
{
4183 4184
	struct rtnl_link_stats64 temp;
	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
L
Linus Torvalds 已提交
4185

4186 4187
	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202
		   dev->name, stats->rx_bytes, stats->rx_packets,
		   stats->rx_errors,
		   stats->rx_dropped + stats->rx_missed_errors,
		   stats->rx_fifo_errors,
		   stats->rx_length_errors + stats->rx_over_errors +
		    stats->rx_crc_errors + stats->rx_frame_errors,
		   stats->rx_compressed, stats->multicast,
		   stats->tx_bytes, stats->tx_packets,
		   stats->tx_errors, stats->tx_dropped,
		   stats->tx_fifo_errors, stats->collisions,
		   stats->tx_carrier_errors +
		    stats->tx_aborted_errors +
		    stats->tx_window_errors +
		    stats->tx_heartbeat_errors,
		   stats->tx_compressed);
L
Linus Torvalds 已提交
4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221
}

/*
 *	Called from the PROCfs module. This now uses the new arbitrary sized
 *	/proc/net interface to create /proc/net/dev
 */
static int dev_seq_show(struct seq_file *seq, void *v)
{
	if (v == SEQ_START_TOKEN)
		seq_puts(seq, "Inter-|   Receive                            "
			      "                    |  Transmit\n"
			      " face |bytes    packets errs drop fifo frame "
			      "compressed multicast|bytes    packets errs "
			      "drop fifo colls carrier compressed\n");
	else
		dev_seq_printf_stats(seq, v);
	return 0;
}

C
Changli Gao 已提交
4222
static struct softnet_data *softnet_get_online(loff_t *pos)
L
Linus Torvalds 已提交
4223
{
C
Changli Gao 已提交
4224
	struct softnet_data *sd = NULL;
L
Linus Torvalds 已提交
4225

4226
	while (*pos < nr_cpu_ids)
4227
		if (cpu_online(*pos)) {
C
Changli Gao 已提交
4228
			sd = &per_cpu(softnet_data, *pos);
L
Linus Torvalds 已提交
4229 4230 4231
			break;
		} else
			++*pos;
C
Changli Gao 已提交
4232
	return sd;
L
Linus Torvalds 已提交
4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251
}

static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
{
	return softnet_get_online(pos);
}

static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return softnet_get_online(pos);
}

static void softnet_seq_stop(struct seq_file *seq, void *v)
{
}

static int softnet_seq_show(struct seq_file *seq, void *v)
{
C
Changli Gao 已提交
4252
	struct softnet_data *sd = v;
L
Linus Torvalds 已提交
4253

T
Tom Herbert 已提交
4254
	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
C
Changli Gao 已提交
4255
		   sd->processed, sd->dropped, sd->time_squeeze, 0,
4256
		   0, 0, 0, 0, /* was fastroute */
C
Changli Gao 已提交
4257
		   sd->cpu_collision, sd->received_rps);
L
Linus Torvalds 已提交
4258 4259 4260
	return 0;
}

4261
static const struct seq_operations dev_seq_ops = {
L
Linus Torvalds 已提交
4262 4263 4264 4265 4266 4267 4268 4269
	.start = dev_seq_start,
	.next  = dev_seq_next,
	.stop  = dev_seq_stop,
	.show  = dev_seq_show,
};

static int dev_seq_open(struct inode *inode, struct file *file)
{
4270
	return seq_open_net(inode, file, &dev_seq_ops,
E
Eric Dumazet 已提交
4271
			    sizeof(struct seq_net_private));
4272 4273
}

4274
static const struct file_operations dev_seq_fops = {
L
Linus Torvalds 已提交
4275 4276 4277 4278
	.owner	 = THIS_MODULE,
	.open    = dev_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
4279
	.release = seq_release_net,
L
Linus Torvalds 已提交
4280 4281
};

4282
static const struct seq_operations softnet_seq_ops = {
L
Linus Torvalds 已提交
4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293
	.start = softnet_seq_start,
	.next  = softnet_seq_next,
	.stop  = softnet_seq_stop,
	.show  = softnet_seq_show,
};

static int softnet_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &softnet_seq_ops);
}

4294
static const struct file_operations softnet_seq_fops = {
L
Linus Torvalds 已提交
4295 4296 4297 4298 4299 4300 4301
	.owner	 = THIS_MODULE,
	.open    = softnet_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313
static void *ptype_get_idx(loff_t pos)
{
	struct packet_type *pt = NULL;
	loff_t i = 0;
	int t;

	list_for_each_entry_rcu(pt, &ptype_all, list) {
		if (i == pos)
			return pt;
		++i;
	}

4314
	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4315 4316 4317 4318 4319 4320 4321 4322 4323 4324
		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
			if (i == pos)
				return pt;
			++i;
		}
	}
	return NULL;
}

static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4325
	__acquires(RCU)
4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348
{
	rcu_read_lock();
	return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
}

static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct packet_type *pt;
	struct list_head *nxt;
	int hash;

	++*pos;
	if (v == SEQ_START_TOKEN)
		return ptype_get_idx(0);

	pt = v;
	nxt = pt->list.next;
	if (pt->type == htons(ETH_P_ALL)) {
		if (nxt != &ptype_all)
			goto found;
		hash = 0;
		nxt = ptype_base[0].next;
	} else
4349
		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4350 4351

	while (nxt == &ptype_base[hash]) {
4352
		if (++hash >= PTYPE_HASH_SIZE)
4353 4354 4355 4356 4357 4358 4359 4360
			return NULL;
		nxt = ptype_base[hash].next;
	}
found:
	return list_entry(nxt, struct packet_type, list);
}

static void ptype_seq_stop(struct seq_file *seq, void *v)
4361
	__releases(RCU)
4362 4363 4364 4365 4366 4367 4368 4369 4370 4371
{
	rcu_read_unlock();
}

static int ptype_seq_show(struct seq_file *seq, void *v)
{
	struct packet_type *pt = v;

	if (v == SEQ_START_TOKEN)
		seq_puts(seq, "Type Device      Function\n");
4372
	else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4373 4374 4375 4376 4377
		if (pt->type == htons(ETH_P_ALL))
			seq_puts(seq, "ALL ");
		else
			seq_printf(seq, "%04x", ntohs(pt->type));

A
Alexey Dobriyan 已提交
4378 4379
		seq_printf(seq, " %-8s %pF\n",
			   pt->dev ? pt->dev->name : "", pt->func);
4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393
	}

	return 0;
}

static const struct seq_operations ptype_seq_ops = {
	.start = ptype_seq_start,
	.next  = ptype_seq_next,
	.stop  = ptype_seq_stop,
	.show  = ptype_seq_show,
};

static int ptype_seq_open(struct inode *inode, struct file *file)
{
4394 4395
	return seq_open_net(inode, file, &ptype_seq_ops,
			sizeof(struct seq_net_private));
4396 4397 4398 4399 4400 4401 4402
}

static const struct file_operations ptype_seq_fops = {
	.owner	 = THIS_MODULE,
	.open    = ptype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
4403
	.release = seq_release_net,
4404 4405 4406
};


4407
static int __net_init dev_proc_net_init(struct net *net)
L
Linus Torvalds 已提交
4408 4409 4410
{
	int rc = -ENOMEM;

4411
	if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
L
Linus Torvalds 已提交
4412
		goto out;
4413
	if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
L
Linus Torvalds 已提交
4414
		goto out_dev;
4415
	if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4416
		goto out_softnet;
4417

4418
	if (wext_proc_init(net))
4419
		goto out_ptype;
L
Linus Torvalds 已提交
4420 4421 4422
	rc = 0;
out:
	return rc;
4423
out_ptype:
4424
	proc_net_remove(net, "ptype");
L
Linus Torvalds 已提交
4425
out_softnet:
4426
	proc_net_remove(net, "softnet_stat");
L
Linus Torvalds 已提交
4427
out_dev:
4428
	proc_net_remove(net, "dev");
L
Linus Torvalds 已提交
4429 4430
	goto out;
}
4431

4432
static void __net_exit dev_proc_net_exit(struct net *net)
4433 4434 4435 4436 4437 4438 4439 4440
{
	wext_proc_exit(net);

	proc_net_remove(net, "ptype");
	proc_net_remove(net, "softnet_stat");
	proc_net_remove(net, "dev");
}

4441
static struct pernet_operations __net_initdata dev_proc_ops = {
4442 4443 4444 4445 4446 4447 4448 4449
	.init = dev_proc_net_init,
	.exit = dev_proc_net_exit,
};

static int __init dev_proc_init(void)
{
	return register_pernet_subsys(&dev_proc_ops);
}
L
Linus Torvalds 已提交
4450 4451 4452 4453 4454 4455
#else
#define dev_proc_init() 0
#endif	/* CONFIG_PROC_FS */


/**
J
Jiri Pirko 已提交
4456
 *	netdev_set_master	-	set up master pointer
L
Linus Torvalds 已提交
4457 4458 4459 4460 4461 4462
 *	@slave: slave device
 *	@master: new master device
 *
 *	Changes the master device of the slave. Pass %NULL to break the
 *	bonding. The caller must hold the RTNL semaphore. On a failure
 *	a negative errno code is returned. On success the reference counts
J
Jiri Pirko 已提交
4463
 *	are adjusted and the function returns zero.
L
Linus Torvalds 已提交
4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477
 */
int netdev_set_master(struct net_device *slave, struct net_device *master)
{
	struct net_device *old = slave->master;

	ASSERT_RTNL();

	if (master) {
		if (old)
			return -EBUSY;
		dev_hold(master);
	}

	slave->master = master;
4478

4479
	if (old)
L
Linus Torvalds 已提交
4480
		dev_put(old);
J
Jiri Pirko 已提交
4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503
	return 0;
}
EXPORT_SYMBOL(netdev_set_master);

/**
 *	netdev_set_bond_master	-	set up bonding master/slave pair
 *	@slave: slave device
 *	@master: new master device
 *
 *	Changes the master device of the slave. Pass %NULL to break the
 *	bonding. The caller must hold the RTNL semaphore. On a failure
 *	a negative errno code is returned. On success %RTM_NEWLINK is sent
 *	to the routing socket and the function returns zero.
 */
int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
{
	int err;

	ASSERT_RTNL();

	err = netdev_set_master(slave, master);
	if (err)
		return err;
L
Linus Torvalds 已提交
4504 4505 4506 4507 4508 4509 4510 4511
	if (master)
		slave->flags |= IFF_SLAVE;
	else
		slave->flags &= ~IFF_SLAVE;

	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
	return 0;
}
J
Jiri Pirko 已提交
4512
EXPORT_SYMBOL(netdev_set_bond_master);
L
Linus Torvalds 已提交
4513

4514 4515
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
4516 4517 4518 4519
	const struct net_device_ops *ops = dev->netdev_ops;

	if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
		ops->ndo_change_rx_flags(dev, flags);
4520 4521
}

4522
static int __dev_set_promiscuity(struct net_device *dev, int inc)
L
Linus Torvalds 已提交
4523
{
4524
	unsigned int old_flags = dev->flags;
4525 4526
	kuid_t uid;
	kgid_t gid;
L
Linus Torvalds 已提交
4527

4528 4529
	ASSERT_RTNL();

4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540
	dev->flags |= IFF_PROMISC;
	dev->promiscuity += inc;
	if (dev->promiscuity == 0) {
		/*
		 * Avoid overflow.
		 * If inc causes overflow, untouch promisc and return error.
		 */
		if (inc < 0)
			dev->flags &= ~IFF_PROMISC;
		else {
			dev->promiscuity -= inc;
4541 4542
			pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
				dev->name);
4543 4544 4545
			return -EOVERFLOW;
		}
	}
4546
	if (dev->flags != old_flags) {
4547 4548 4549
		pr_info("device %s %s promiscuous mode\n",
			dev->name,
			dev->flags & IFF_PROMISC ? "entered" : "left");
4550 4551
		if (audit_enabled) {
			current_uid_gid(&uid, &gid);
4552 4553 4554 4555 4556 4557
			audit_log(current->audit_context, GFP_ATOMIC,
				AUDIT_ANOM_PROMISCUOUS,
				"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
				dev->name, (dev->flags & IFF_PROMISC),
				(old_flags & IFF_PROMISC),
				audit_get_loginuid(current),
4558 4559
				from_kuid(&init_user_ns, uid),
				from_kgid(&init_user_ns, gid),
4560
				audit_get_sessionid(current));
4561
		}
4562

4563
		dev_change_rx_flags(dev, IFF_PROMISC);
L
Linus Torvalds 已提交
4564
	}
4565
	return 0;
L
Linus Torvalds 已提交
4566 4567
}

4568 4569 4570 4571 4572 4573 4574 4575 4576
/**
 *	dev_set_promiscuity	- update promiscuity count on a device
 *	@dev: device
 *	@inc: modifier
 *
 *	Add or remove promiscuity from a device. While the count in the device
 *	remains above zero the interface remains promiscuous. Once it hits zero
 *	the device reverts back to normal filtering operation. A negative inc
 *	value is used to drop promiscuity on the device.
4577
 *	Return 0 if successful or a negative errno code on error.
4578
 */
4579
int dev_set_promiscuity(struct net_device *dev, int inc)
4580
{
4581
	unsigned int old_flags = dev->flags;
4582
	int err;
4583

4584
	err = __dev_set_promiscuity(dev, inc);
4585
	if (err < 0)
4586
		return err;
4587 4588
	if (dev->flags != old_flags)
		dev_set_rx_mode(dev);
4589
	return err;
4590
}
E
Eric Dumazet 已提交
4591
EXPORT_SYMBOL(dev_set_promiscuity);
4592

L
Linus Torvalds 已提交
4593 4594 4595 4596 4597 4598 4599 4600 4601 4602
/**
 *	dev_set_allmulti	- update allmulti count on a device
 *	@dev: device
 *	@inc: modifier
 *
 *	Add or remove reception of all multicast frames to a device. While the
 *	count in the device remains above zero the interface remains listening
 *	to all interfaces. Once it hits zero the device reverts back to normal
 *	filtering operation. A negative @inc value is used to drop the counter
 *	when releasing a resource needing all multicasts.
4603
 *	Return 0 if successful or a negative errno code on error.
L
Linus Torvalds 已提交
4604 4605
 */

4606
int dev_set_allmulti(struct net_device *dev, int inc)
L
Linus Torvalds 已提交
4607
{
4608
	unsigned int old_flags = dev->flags;
L
Linus Torvalds 已提交
4609

4610 4611
	ASSERT_RTNL();

L
Linus Torvalds 已提交
4612
	dev->flags |= IFF_ALLMULTI;
4613 4614 4615 4616 4617 4618 4619 4620 4621 4622
	dev->allmulti += inc;
	if (dev->allmulti == 0) {
		/*
		 * Avoid overflow.
		 * If inc causes overflow, untouch allmulti and return error.
		 */
		if (inc < 0)
			dev->flags &= ~IFF_ALLMULTI;
		else {
			dev->allmulti -= inc;
4623 4624
			pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
				dev->name);
4625 4626 4627
			return -EOVERFLOW;
		}
	}
4628
	if (dev->flags ^ old_flags) {
4629
		dev_change_rx_flags(dev, IFF_ALLMULTI);
4630
		dev_set_rx_mode(dev);
4631
	}
4632
	return 0;
4633
}
E
Eric Dumazet 已提交
4634
EXPORT_SYMBOL(dev_set_allmulti);
4635 4636 4637 4638

/*
 *	Upload unicast and multicast address lists to device and
 *	configure RX filtering. When the device doesn't support unicast
J
Joe Perches 已提交
4639
 *	filtering it is put in promiscuous mode while unicast addresses
4640 4641 4642 4643
 *	are present.
 */
void __dev_set_rx_mode(struct net_device *dev)
{
4644 4645
	const struct net_device_ops *ops = dev->netdev_ops;

4646 4647 4648 4649 4650
	/* dev_open will call this function so the list will stay sane. */
	if (!(dev->flags&IFF_UP))
		return;

	if (!netif_device_present(dev))
4651
		return;
4652

4653
	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4654 4655 4656
		/* Unicast addresses changes may only happen under the rtnl,
		 * therefore calling __dev_set_promiscuity here is safe.
		 */
4657
		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4658
			__dev_set_promiscuity(dev, 1);
4659
			dev->uc_promisc = true;
4660
		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4661
			__dev_set_promiscuity(dev, -1);
4662
			dev->uc_promisc = false;
4663 4664
		}
	}
4665 4666 4667

	if (ops->ndo_set_rx_mode)
		ops->ndo_set_rx_mode(dev);
4668 4669 4670 4671
}

void dev_set_rx_mode(struct net_device *dev)
{
4672
	netif_addr_lock_bh(dev);
4673
	__dev_set_rx_mode(dev);
4674
	netif_addr_unlock_bh(dev);
L
Linus Torvalds 已提交
4675 4676
}

4677 4678 4679 4680 4681 4682
/**
 *	dev_get_flags - get flags reported to userspace
 *	@dev: device
 *
 *	Get the combination of flag bits exported through APIs to userspace.
 */
4683
unsigned int dev_get_flags(const struct net_device *dev)
L
Linus Torvalds 已提交
4684
{
4685
	unsigned int flags;
L
Linus Torvalds 已提交
4686 4687 4688

	flags = (dev->flags & ~(IFF_PROMISC |
				IFF_ALLMULTI |
S
Stefan Rompf 已提交
4689 4690 4691
				IFF_RUNNING |
				IFF_LOWER_UP |
				IFF_DORMANT)) |
L
Linus Torvalds 已提交
4692 4693 4694
		(dev->gflags & (IFF_PROMISC |
				IFF_ALLMULTI));

S
Stefan Rompf 已提交
4695 4696 4697 4698 4699 4700 4701 4702
	if (netif_running(dev)) {
		if (netif_oper_up(dev))
			flags |= IFF_RUNNING;
		if (netif_carrier_ok(dev))
			flags |= IFF_LOWER_UP;
		if (netif_dormant(dev))
			flags |= IFF_DORMANT;
	}
L
Linus Torvalds 已提交
4703 4704 4705

	return flags;
}
E
Eric Dumazet 已提交
4706
EXPORT_SYMBOL(dev_get_flags);
L
Linus Torvalds 已提交
4707

4708
int __dev_change_flags(struct net_device *dev, unsigned int flags)
L
Linus Torvalds 已提交
4709
{
4710
	unsigned int old_flags = dev->flags;
4711
	int ret;
L
Linus Torvalds 已提交
4712

4713 4714
	ASSERT_RTNL();

L
Linus Torvalds 已提交
4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728
	/*
	 *	Set the flags on our device.
	 */

	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
			       IFF_AUTOMEDIA)) |
		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
				    IFF_ALLMULTI));

	/*
	 *	Load in the correct multicast list now the flags have changed.
	 */

4729 4730
	if ((old_flags ^ flags) & IFF_MULTICAST)
		dev_change_rx_flags(dev, IFF_MULTICAST);
4731

4732
	dev_set_rx_mode(dev);
L
Linus Torvalds 已提交
4733 4734 4735 4736 4737 4738 4739 4740 4741

	/*
	 *	Have we downed the interface. We handle IFF_UP ourselves
	 *	according to user attempts to set it, rather than blindly
	 *	setting it.
	 */

	ret = 0;
	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */
4742
		ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
L
Linus Torvalds 已提交
4743 4744

		if (!ret)
4745
			dev_set_rx_mode(dev);
L
Linus Torvalds 已提交
4746 4747 4748
	}

	if ((flags ^ dev->gflags) & IFF_PROMISC) {
E
Eric Dumazet 已提交
4749 4750
		int inc = (flags & IFF_PROMISC) ? 1 : -1;

L
Linus Torvalds 已提交
4751 4752 4753 4754 4755 4756 4757 4758 4759
		dev->gflags ^= IFF_PROMISC;
		dev_set_promiscuity(dev, inc);
	}

	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
	   is important. Some (broken) drivers set IFF_PROMISC, when
	   IFF_ALLMULTI is requested not asking us and not reporting.
	 */
	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
E
Eric Dumazet 已提交
4760 4761
		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;

L
Linus Torvalds 已提交
4762 4763 4764 4765
		dev->gflags ^= IFF_ALLMULTI;
		dev_set_allmulti(dev, inc);
	}

4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792
	return ret;
}

void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
{
	unsigned int changes = dev->flags ^ old_flags;

	if (changes & IFF_UP) {
		if (dev->flags & IFF_UP)
			call_netdevice_notifiers(NETDEV_UP, dev);
		else
			call_netdevice_notifiers(NETDEV_DOWN, dev);
	}

	if (dev->flags & IFF_UP &&
	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
		call_netdevice_notifiers(NETDEV_CHANGE, dev);
}

/**
 *	dev_change_flags - change device settings
 *	@dev: device
 *	@flags: device state flags
 *
 *	Change settings on device based state flags. The flags are
 *	in the userspace exported format.
 */
4793
int dev_change_flags(struct net_device *dev, unsigned int flags)
4794
{
4795 4796
	int ret;
	unsigned int changes, old_flags = dev->flags;
4797 4798 4799 4800 4801 4802

	ret = __dev_change_flags(dev, flags);
	if (ret < 0)
		return ret;

	changes = old_flags ^ dev->flags;
4803 4804
	if (changes)
		rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
L
Linus Torvalds 已提交
4805

4806
	__dev_notify_flags(dev, old_flags);
L
Linus Torvalds 已提交
4807 4808
	return ret;
}
E
Eric Dumazet 已提交
4809
EXPORT_SYMBOL(dev_change_flags);
L
Linus Torvalds 已提交
4810

4811 4812 4813 4814 4815 4816 4817
/**
 *	dev_set_mtu - Change maximum transfer unit
 *	@dev: device
 *	@new_mtu: new transfer unit
 *
 *	Change the maximum transfer size of the network device.
 */
L
Linus Torvalds 已提交
4818 4819
int dev_set_mtu(struct net_device *dev, int new_mtu)
{
4820
	const struct net_device_ops *ops = dev->netdev_ops;
L
Linus Torvalds 已提交
4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833
	int err;

	if (new_mtu == dev->mtu)
		return 0;

	/*	MTU must be positive.	 */
	if (new_mtu < 0)
		return -EINVAL;

	if (!netif_device_present(dev))
		return -ENODEV;

	err = 0;
4834 4835
	if (ops->ndo_change_mtu)
		err = ops->ndo_change_mtu(dev, new_mtu);
L
Linus Torvalds 已提交
4836 4837
	else
		dev->mtu = new_mtu;
4838

L
Linus Torvalds 已提交
4839
	if (!err && dev->flags & IFF_UP)
4840
		call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
L
Linus Torvalds 已提交
4841 4842
	return err;
}
E
Eric Dumazet 已提交
4843
EXPORT_SYMBOL(dev_set_mtu);
L
Linus Torvalds 已提交
4844

4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855
/**
 *	dev_set_group - Change group this device belongs to
 *	@dev: device
 *	@new_group: group this device should belong to
 */
void dev_set_group(struct net_device *dev, int new_group)
{
	dev->group = new_group;
}
EXPORT_SYMBOL(dev_set_group);

4856 4857 4858 4859 4860 4861 4862
/**
 *	dev_set_mac_address - Change Media Access Control Address
 *	@dev: device
 *	@sa: new address
 *
 *	Change the hardware (MAC) address of the device
 */
L
Linus Torvalds 已提交
4863 4864
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
{
4865
	const struct net_device_ops *ops = dev->netdev_ops;
L
Linus Torvalds 已提交
4866 4867
	int err;

4868
	if (!ops->ndo_set_mac_address)
L
Linus Torvalds 已提交
4869 4870 4871 4872 4873
		return -EOPNOTSUPP;
	if (sa->sa_family != dev->type)
		return -EINVAL;
	if (!netif_device_present(dev))
		return -ENODEV;
4874
	err = ops->ndo_set_mac_address(dev, sa);
L
Linus Torvalds 已提交
4875
	if (!err)
4876
		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4877
	add_device_randomness(dev->dev_addr, dev->addr_len);
L
Linus Torvalds 已提交
4878 4879
	return err;
}
E
Eric Dumazet 已提交
4880
EXPORT_SYMBOL(dev_set_mac_address);
L
Linus Torvalds 已提交
4881 4882

/*
E
Eric Dumazet 已提交
4883
 *	Perform the SIOCxIFxxx calls, inside rcu_read_lock()
L
Linus Torvalds 已提交
4884
 */
4885
static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
L
Linus Torvalds 已提交
4886 4887
{
	int err;
E
Eric Dumazet 已提交
4888
	struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
L
Linus Torvalds 已提交
4889 4890 4891 4892 4893

	if (!dev)
		return -ENODEV;

	switch (cmd) {
E
Eric Dumazet 已提交
4894 4895 4896
	case SIOCGIFFLAGS:	/* Get interface flags */
		ifr->ifr_flags = (short) dev_get_flags(dev);
		return 0;
L
Linus Torvalds 已提交
4897

E
Eric Dumazet 已提交
4898 4899 4900 4901
	case SIOCGIFMETRIC:	/* Get the metric on the interface
				   (currently unused) */
		ifr->ifr_metric = 0;
		return 0;
L
Linus Torvalds 已提交
4902

E
Eric Dumazet 已提交
4903 4904 4905
	case SIOCGIFMTU:	/* Get the MTU of a device */
		ifr->ifr_mtu = dev->mtu;
		return 0;
L
Linus Torvalds 已提交
4906

E
Eric Dumazet 已提交
4907 4908 4909 4910 4911 4912 4913 4914
	case SIOCGIFHWADDR:
		if (!dev->addr_len)
			memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
		else
			memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
			       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
		ifr->ifr_hwaddr.sa_family = dev->type;
		return 0;
L
Linus Torvalds 已提交
4915

E
Eric Dumazet 已提交
4916 4917 4918
	case SIOCGIFSLAVE:
		err = -EINVAL;
		break;
4919

E
Eric Dumazet 已提交
4920 4921 4922 4923 4924 4925 4926 4927
	case SIOCGIFMAP:
		ifr->ifr_map.mem_start = dev->mem_start;
		ifr->ifr_map.mem_end   = dev->mem_end;
		ifr->ifr_map.base_addr = dev->base_addr;
		ifr->ifr_map.irq       = dev->irq;
		ifr->ifr_map.dma       = dev->dma;
		ifr->ifr_map.port      = dev->if_port;
		return 0;
4928

E
Eric Dumazet 已提交
4929 4930 4931
	case SIOCGIFINDEX:
		ifr->ifr_ifindex = dev->ifindex;
		return 0;
4932

E
Eric Dumazet 已提交
4933 4934 4935
	case SIOCGIFTXQLEN:
		ifr->ifr_qlen = dev->tx_queue_len;
		return 0;
4936

E
Eric Dumazet 已提交
4937 4938 4939 4940 4941
	default:
		/* dev_ioctl() should ensure this case
		 * is never reached
		 */
		WARN_ON(1);
4942
		err = -ENOTTY;
E
Eric Dumazet 已提交
4943
		break;
4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955

	}
	return err;
}

/*
 *	Perform the SIOCxIFxxx calls, inside rtnl_lock()
 */
static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
{
	int err;
	struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
J
Jarek Poplawski 已提交
4956
	const struct net_device_ops *ops;
4957 4958 4959 4960

	if (!dev)
		return -ENODEV;

J
Jarek Poplawski 已提交
4961 4962
	ops = dev->netdev_ops;

4963
	switch (cmd) {
E
Eric Dumazet 已提交
4964 4965
	case SIOCSIFFLAGS:	/* Set interface flags */
		return dev_change_flags(dev, ifr->ifr_flags);
4966

E
Eric Dumazet 已提交
4967 4968 4969
	case SIOCSIFMETRIC:	/* Set the metric on the interface
				   (currently unused) */
		return -EOPNOTSUPP;
4970

E
Eric Dumazet 已提交
4971 4972
	case SIOCSIFMTU:	/* Set the MTU of a device */
		return dev_set_mtu(dev, ifr->ifr_mtu);
L
Linus Torvalds 已提交
4973

E
Eric Dumazet 已提交
4974 4975
	case SIOCSIFHWADDR:
		return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
L
Linus Torvalds 已提交
4976

E
Eric Dumazet 已提交
4977 4978 4979 4980 4981 4982 4983
	case SIOCSIFHWBROADCAST:
		if (ifr->ifr_hwaddr.sa_family != dev->type)
			return -EINVAL;
		memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
		       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
		return 0;
L
Linus Torvalds 已提交
4984

E
Eric Dumazet 已提交
4985 4986
	case SIOCSIFMAP:
		if (ops->ndo_set_config) {
L
Linus Torvalds 已提交
4987 4988
			if (!netif_device_present(dev))
				return -ENODEV;
E
Eric Dumazet 已提交
4989 4990 4991
			return ops->ndo_set_config(dev, &ifr->ifr_map);
		}
		return -EOPNOTSUPP;
L
Linus Torvalds 已提交
4992

E
Eric Dumazet 已提交
4993
	case SIOCADDMULTI:
4994
		if (!ops->ndo_set_rx_mode ||
E
Eric Dumazet 已提交
4995 4996 4997 4998
		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
			return -EINVAL;
		if (!netif_device_present(dev))
			return -ENODEV;
4999
		return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
E
Eric Dumazet 已提交
5000 5001

	case SIOCDELMULTI:
5002
		if (!ops->ndo_set_rx_mode ||
E
Eric Dumazet 已提交
5003 5004 5005 5006
		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
			return -EINVAL;
		if (!netif_device_present(dev))
			return -ENODEV;
5007
		return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
L
Linus Torvalds 已提交
5008

E
Eric Dumazet 已提交
5009 5010 5011 5012 5013
	case SIOCSIFTXQLEN:
		if (ifr->ifr_qlen < 0)
			return -EINVAL;
		dev->tx_queue_len = ifr->ifr_qlen;
		return 0;
L
Linus Torvalds 已提交
5014

E
Eric Dumazet 已提交
5015 5016 5017
	case SIOCSIFNAME:
		ifr->ifr_newname[IFNAMSIZ-1] = '\0';
		return dev_change_name(dev, ifr->ifr_newname);
L
Linus Torvalds 已提交
5018

5019 5020 5021 5022 5023 5024
	case SIOCSHWTSTAMP:
		err = net_hwtstamp_validate(ifr);
		if (err)
			return err;
		/* fall through */

E
Eric Dumazet 已提交
5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052
	/*
	 *	Unknown or private ioctl
	 */
	default:
		if ((cmd >= SIOCDEVPRIVATE &&
		    cmd <= SIOCDEVPRIVATE + 15) ||
		    cmd == SIOCBONDENSLAVE ||
		    cmd == SIOCBONDRELEASE ||
		    cmd == SIOCBONDSETHWADDR ||
		    cmd == SIOCBONDSLAVEINFOQUERY ||
		    cmd == SIOCBONDINFOQUERY ||
		    cmd == SIOCBONDCHANGEACTIVE ||
		    cmd == SIOCGMIIPHY ||
		    cmd == SIOCGMIIREG ||
		    cmd == SIOCSMIIREG ||
		    cmd == SIOCBRADDIF ||
		    cmd == SIOCBRDELIF ||
		    cmd == SIOCSHWTSTAMP ||
		    cmd == SIOCWANDEV) {
			err = -EOPNOTSUPP;
			if (ops->ndo_do_ioctl) {
				if (netif_device_present(dev))
					err = ops->ndo_do_ioctl(dev, ifr, cmd);
				else
					err = -ENODEV;
			}
		} else
			err = -EINVAL;
L
Linus Torvalds 已提交
5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064

	}
	return err;
}

/*
 *	This function handles all "interface"-type I/O control requests. The actual
 *	'doing' part of this is dev_ifsioc above.
 */

/**
 *	dev_ioctl	-	network device ioctl
5065
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
5066 5067 5068 5069 5070 5071 5072 5073 5074
 *	@cmd: command to issue
 *	@arg: pointer to a struct ifreq in user space
 *
 *	Issue ioctl functions to devices. This is normally called by the
 *	user space syscall interfaces but can sometimes be useful for
 *	other purposes. The return value is the return from the syscall if
 *	positive or a negative errno code on error.
 */

5075
int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
L
Linus Torvalds 已提交
5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086
{
	struct ifreq ifr;
	int ret;
	char *colon;

	/* One special case: SIOCGIFCONF takes ifconf argument
	   and requires shared lock, because it sleeps writing
	   to user space.
	 */

	if (cmd == SIOCGIFCONF) {
5087
		rtnl_lock();
5088
		ret = dev_ifconf(net, (char __user *) arg);
5089
		rtnl_unlock();
L
Linus Torvalds 已提交
5090 5091 5092
		return ret;
	}
	if (cmd == SIOCGIFNAME)
5093
		return dev_ifname(net, (struct ifreq __user *)arg);
L
Linus Torvalds 已提交
5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108

	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
		return -EFAULT;

	ifr.ifr_name[IFNAMSIZ-1] = 0;

	colon = strchr(ifr.ifr_name, ':');
	if (colon)
		*colon = 0;

	/*
	 *	See which interface the caller is talking about.
	 */

	switch (cmd) {
E
Eric Dumazet 已提交
5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123
	/*
	 *	These ioctl calls:
	 *	- can be done by all.
	 *	- atomic and do not require locking.
	 *	- return a value
	 */
	case SIOCGIFFLAGS:
	case SIOCGIFMETRIC:
	case SIOCGIFMTU:
	case SIOCGIFHWADDR:
	case SIOCGIFSLAVE:
	case SIOCGIFMAP:
	case SIOCGIFINDEX:
	case SIOCGIFTXQLEN:
		dev_load(net, ifr.ifr_name);
E
Eric Dumazet 已提交
5124
		rcu_read_lock();
E
Eric Dumazet 已提交
5125
		ret = dev_ifsioc_locked(net, &ifr, cmd);
E
Eric Dumazet 已提交
5126
		rcu_read_unlock();
E
Eric Dumazet 已提交
5127 5128 5129 5130 5131 5132 5133 5134
		if (!ret) {
			if (colon)
				*colon = ':';
			if (copy_to_user(arg, &ifr,
					 sizeof(struct ifreq)))
				ret = -EFAULT;
		}
		return ret;
L
Linus Torvalds 已提交
5135

E
Eric Dumazet 已提交
5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148
	case SIOCETHTOOL:
		dev_load(net, ifr.ifr_name);
		rtnl_lock();
		ret = dev_ethtool(net, &ifr);
		rtnl_unlock();
		if (!ret) {
			if (colon)
				*colon = ':';
			if (copy_to_user(arg, &ifr,
					 sizeof(struct ifreq)))
				ret = -EFAULT;
		}
		return ret;
L
Linus Torvalds 已提交
5149

E
Eric Dumazet 已提交
5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172
	/*
	 *	These ioctl calls:
	 *	- require superuser power.
	 *	- require strict serialization.
	 *	- return a value
	 */
	case SIOCGMIIPHY:
	case SIOCGMIIREG:
	case SIOCSIFNAME:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		dev_load(net, ifr.ifr_name);
		rtnl_lock();
		ret = dev_ifsioc(net, &ifr, cmd);
		rtnl_unlock();
		if (!ret) {
			if (colon)
				*colon = ':';
			if (copy_to_user(arg, &ifr,
					 sizeof(struct ifreq)))
				ret = -EFAULT;
		}
		return ret;
L
Linus Torvalds 已提交
5173

E
Eric Dumazet 已提交
5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215
	/*
	 *	These ioctl calls:
	 *	- require superuser power.
	 *	- require strict serialization.
	 *	- do not return a value
	 */
	case SIOCSIFFLAGS:
	case SIOCSIFMETRIC:
	case SIOCSIFMTU:
	case SIOCSIFMAP:
	case SIOCSIFHWADDR:
	case SIOCSIFSLAVE:
	case SIOCADDMULTI:
	case SIOCDELMULTI:
	case SIOCSIFHWBROADCAST:
	case SIOCSIFTXQLEN:
	case SIOCSMIIREG:
	case SIOCBONDENSLAVE:
	case SIOCBONDRELEASE:
	case SIOCBONDSETHWADDR:
	case SIOCBONDCHANGEACTIVE:
	case SIOCBRADDIF:
	case SIOCBRDELIF:
	case SIOCSHWTSTAMP:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		/* fall through */
	case SIOCBONDSLAVEINFOQUERY:
	case SIOCBONDINFOQUERY:
		dev_load(net, ifr.ifr_name);
		rtnl_lock();
		ret = dev_ifsioc(net, &ifr, cmd);
		rtnl_unlock();
		return ret;

	case SIOCGIFMEM:
		/* Get the per device memory space. We can add this but
		 * currently do not support it */
	case SIOCSIFMEM:
		/* Set the per device memory buffer space.
		 * Not applicable in our case */
	case SIOCSIFLINK:
5216
		return -ENOTTY;
E
Eric Dumazet 已提交
5217 5218 5219 5220 5221 5222 5223 5224

	/*
	 *	Unknown or private ioctl.
	 */
	default:
		if (cmd == SIOCWANDEV ||
		    (cmd >= SIOCDEVPRIVATE &&
		     cmd <= SIOCDEVPRIVATE + 15)) {
5225
			dev_load(net, ifr.ifr_name);
L
Linus Torvalds 已提交
5226
			rtnl_lock();
5227
			ret = dev_ifsioc(net, &ifr, cmd);
L
Linus Torvalds 已提交
5228
			rtnl_unlock();
E
Eric Dumazet 已提交
5229 5230 5231
			if (!ret && copy_to_user(arg, &ifr,
						 sizeof(struct ifreq)))
				ret = -EFAULT;
L
Linus Torvalds 已提交
5232
			return ret;
E
Eric Dumazet 已提交
5233 5234 5235 5236
		}
		/* Take care of Wireless Extensions */
		if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
			return wext_handle_ioctl(net, &ifr, cmd, arg);
5237
		return -ENOTTY;
L
Linus Torvalds 已提交
5238 5239 5240 5241 5242 5243
	}
}


/**
 *	dev_new_index	-	allocate an ifindex
5244
 *	@net: the applicable net namespace
L
Linus Torvalds 已提交
5245 5246 5247 5248 5249
 *
 *	Returns a suitable unique value for a new device interface
 *	number.  The caller must hold the rtnl semaphore or the
 *	dev_base_lock to be sure it remains unique.
 */
5250
static int dev_new_index(struct net *net)
L
Linus Torvalds 已提交
5251
{
5252
	int ifindex = net->ifindex;
L
Linus Torvalds 已提交
5253 5254 5255
	for (;;) {
		if (++ifindex <= 0)
			ifindex = 1;
5256
		if (!__dev_get_by_index(net, ifindex))
5257
			return net->ifindex = ifindex;
L
Linus Torvalds 已提交
5258 5259 5260 5261
	}
}

/* Delayed registration/unregisteration */
5262
static LIST_HEAD(net_todo_list);
L
Linus Torvalds 已提交
5263

5264
static void net_set_todo(struct net_device *dev)
L
Linus Torvalds 已提交
5265 5266 5267 5268
{
	list_add_tail(&dev->todo_list, &net_todo_list);
}

5269
static void rollback_registered_many(struct list_head *head)
5270
{
5271
	struct net_device *dev, *tmp;
5272

5273 5274 5275
	BUG_ON(dev_boot_phase);
	ASSERT_RTNL();

5276
	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5277
		/* Some devices call without registering
5278 5279
		 * for initialization unwind. Remove those
		 * devices and proceed with the remaining.
5280 5281
		 */
		if (dev->reg_state == NETREG_UNINITIALIZED) {
5282 5283
			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
				 dev->name, dev);
5284

5285
			WARN_ON(1);
5286 5287
			list_del(&dev->unreg_list);
			continue;
5288
		}
5289
		dev->dismantle = true;
5290
		BUG_ON(dev->reg_state != NETREG_REGISTERED);
5291
	}
5292

5293 5294
	/* If device is running, close it first. */
	dev_close_many(head);
5295

5296
	list_for_each_entry(dev, head, unreg_list) {
5297 5298
		/* And unlink it from device chain. */
		unlist_netdevice(dev);
5299

5300 5301
		dev->reg_state = NETREG_UNREGISTERING;
	}
5302 5303 5304

	synchronize_net();

5305 5306 5307
	list_for_each_entry(dev, head, unreg_list) {
		/* Shutdown queueing discipline. */
		dev_shutdown(dev);
5308 5309


5310 5311 5312 5313
		/* Notify protocols, that we are about to destroy
		   this device. They should clean all the things.
		*/
		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5314

5315 5316 5317 5318
		if (!dev->rtnl_link_ops ||
		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);

5319 5320 5321
		/*
		 *	Flush the unicast and multicast chains
		 */
5322
		dev_uc_flush(dev);
5323
		dev_mc_flush(dev);
5324

5325 5326
		if (dev->netdev_ops->ndo_uninit)
			dev->netdev_ops->ndo_uninit(dev);
5327

5328 5329
		/* Notifier chain MUST detach us from master device. */
		WARN_ON(dev->master);
5330

5331 5332 5333
		/* Remove entries from kobject tree */
		netdev_unregister_kobject(dev);
	}
5334

5335
	synchronize_net();
5336

5337
	list_for_each_entry(dev, head, unreg_list)
5338 5339 5340 5341 5342 5343 5344 5345 5346
		dev_put(dev);
}

static void rollback_registered(struct net_device *dev)
{
	LIST_HEAD(single);

	list_add(&dev->unreg_list, &single);
	rollback_registered_many(&single);
E
Eric Dumazet 已提交
5347
	list_del(&single);
5348 5349
}

5350 5351
static netdev_features_t netdev_fix_features(struct net_device *dev,
	netdev_features_t features)
5352
{
5353 5354 5355
	/* Fix illegal checksum combinations */
	if ((features & NETIF_F_HW_CSUM) &&
	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5356
		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5357 5358 5359
		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
	}

5360 5361 5362
	/* Fix illegal SG+CSUM combinations. */
	if ((features & NETIF_F_SG) &&
	    !(features & NETIF_F_ALL_CSUM)) {
5363 5364
		netdev_dbg(dev,
			"Dropping NETIF_F_SG since no checksum feature.\n");
5365 5366 5367 5368
		features &= ~NETIF_F_SG;
	}

	/* TSO requires that SG is present as well. */
5369
	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5370
		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5371
		features &= ~NETIF_F_ALL_TSO;
5372 5373
	}

5374 5375 5376 5377
	/* TSO ECN requires that TSO is present as well. */
	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
		features &= ~NETIF_F_TSO_ECN;

5378 5379
	/* Software GSO depends on SG. */
	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5380
		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5381 5382 5383
		features &= ~NETIF_F_GSO;
	}

5384
	/* UFO needs SG and checksumming */
5385
	if (features & NETIF_F_UFO) {
5386 5387 5388 5389
		/* maybe split UFO into V4 and V6? */
		if (!((features & NETIF_F_GEN_CSUM) ||
		    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
			    == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5390
			netdev_dbg(dev,
5391
				"Dropping NETIF_F_UFO since no checksum offload features.\n");
5392 5393 5394 5395
			features &= ~NETIF_F_UFO;
		}

		if (!(features & NETIF_F_SG)) {
5396
			netdev_dbg(dev,
5397
				"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5398 5399 5400 5401 5402 5403 5404
			features &= ~NETIF_F_UFO;
		}
	}

	return features;
}

5405
int __netdev_update_features(struct net_device *dev)
5406
{
5407
	netdev_features_t features;
5408 5409
	int err = 0;

5410 5411
	ASSERT_RTNL();

5412 5413 5414 5415 5416 5417 5418 5419 5420
	features = netdev_get_wanted_features(dev);

	if (dev->netdev_ops->ndo_fix_features)
		features = dev->netdev_ops->ndo_fix_features(dev, features);

	/* driver might be less strict about feature dependencies */
	features = netdev_fix_features(dev, features);

	if (dev->features == features)
5421
		return 0;
5422

5423 5424
	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
		&dev->features, &features);
5425 5426 5427 5428

	if (dev->netdev_ops->ndo_set_features)
		err = dev->netdev_ops->ndo_set_features(dev, features);

5429
	if (unlikely(err < 0)) {
5430
		netdev_err(dev,
5431 5432
			"set_features() failed (%d); wanted %pNF, left %pNF\n",
			err, &features, &dev->features);
5433 5434 5435 5436 5437 5438 5439 5440 5441
		return -1;
	}

	if (!err)
		dev->features = features;

	return 1;
}

5442 5443 5444 5445 5446 5447 5448 5449
/**
 *	netdev_update_features - recalculate device features
 *	@dev: the device to check
 *
 *	Recalculate dev->features set and send notifications if it
 *	has changed. Should be called after driver or hardware dependent
 *	conditions might have changed that influence the features.
 */
5450 5451 5452 5453
void netdev_update_features(struct net_device *dev)
{
	if (__netdev_update_features(dev))
		netdev_features_change(dev);
5454 5455 5456
}
EXPORT_SYMBOL(netdev_update_features);

5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473
/**
 *	netdev_change_features - recalculate device features
 *	@dev: the device to check
 *
 *	Recalculate dev->features set and send notifications even
 *	if they have not changed. Should be called instead of
 *	netdev_update_features() if also dev->vlan_features might
 *	have changed to allow the changes to be propagated to stacked
 *	VLAN devices.
 */
void netdev_change_features(struct net_device *dev)
{
	__netdev_update_features(dev);
	netdev_features_change(dev);
}
EXPORT_SYMBOL(netdev_change_features);

5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500
/**
 *	netif_stacked_transfer_operstate -	transfer operstate
 *	@rootdev: the root or lower level device to transfer state from
 *	@dev: the device to transfer operstate to
 *
 *	Transfer operational state from root to device. This is normally
 *	called when a stacking relationship exists between the root
 *	device and the device(a leaf device).
 */
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
					struct net_device *dev)
{
	if (rootdev->operstate == IF_OPER_DORMANT)
		netif_dormant_on(dev);
	else
		netif_dormant_off(dev);

	if (netif_carrier_ok(rootdev)) {
		if (!netif_carrier_ok(dev))
			netif_carrier_on(dev);
	} else {
		if (netif_carrier_ok(dev))
			netif_carrier_off(dev);
	}
}
EXPORT_SYMBOL(netif_stacked_transfer_operstate);

T
Tom Herbert 已提交
5501
#ifdef CONFIG_RPS
5502 5503 5504
static int netif_alloc_rx_queues(struct net_device *dev)
{
	unsigned int i, count = dev->num_rx_queues;
T
Tom Herbert 已提交
5505
	struct netdev_rx_queue *rx;
5506

T
Tom Herbert 已提交
5507
	BUG_ON(count < 1);
5508

T
Tom Herbert 已提交
5509 5510
	rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
	if (!rx) {
5511
		pr_err("netdev: Unable to allocate %u rx queues\n", count);
T
Tom Herbert 已提交
5512
		return -ENOMEM;
5513
	}
T
Tom Herbert 已提交
5514 5515 5516
	dev->_rx = rx;

	for (i = 0; i < count; i++)
T
Tom Herbert 已提交
5517
		rx[i].dev = dev;
5518 5519
	return 0;
}
T
Tom Herbert 已提交
5520
#endif
5521

C
Changli Gao 已提交
5522 5523 5524 5525 5526 5527 5528
static void netdev_init_one_queue(struct net_device *dev,
				  struct netdev_queue *queue, void *_unused)
{
	/* Initialize queue lock */
	spin_lock_init(&queue->_xmit_lock);
	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
	queue->xmit_lock_owner = -1;
5529
	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
C
Changli Gao 已提交
5530
	queue->dev = dev;
T
Tom Herbert 已提交
5531 5532 5533
#ifdef CONFIG_BQL
	dql_init(&queue->dql, HZ);
#endif
C
Changli Gao 已提交
5534 5535
}

5536 5537 5538 5539 5540 5541 5542 5543 5544
static int netif_alloc_netdev_queues(struct net_device *dev)
{
	unsigned int count = dev->num_tx_queues;
	struct netdev_queue *tx;

	BUG_ON(count < 1);

	tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
	if (!tx) {
5545
		pr_err("netdev: Unable to allocate %u tx queues\n", count);
5546 5547 5548
		return -ENOMEM;
	}
	dev->_tx = tx;
T
Tom Herbert 已提交
5549

5550 5551
	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
	spin_lock_init(&dev->tx_global_lock);
C
Changli Gao 已提交
5552 5553

	return 0;
5554 5555
}

L
Linus Torvalds 已提交
5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575
/**
 *	register_netdevice	- register a network device
 *	@dev: device to register
 *
 *	Take a completed network device structure and add it to the kernel
 *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
 *	chain. 0 is returned on success. A negative errno code is returned
 *	on a failure to set up the device, or if the name is a duplicate.
 *
 *	Callers must hold the rtnl semaphore. You may want
 *	register_netdev() instead of this.
 *
 *	BUGS:
 *	The locking appears insufficient to guarantee two parallel registers
 *	will not get the same name.
 */

int register_netdevice(struct net_device *dev)
{
	int ret;
5576
	struct net *net = dev_net(dev);
L
Linus Torvalds 已提交
5577 5578 5579 5580

	BUG_ON(dev_boot_phase);
	ASSERT_RTNL();

5581 5582
	might_sleep();

L
Linus Torvalds 已提交
5583 5584
	/* When net_device's are persistent, this will be fatal. */
	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5585
	BUG_ON(!net);
L
Linus Torvalds 已提交
5586

5587
	spin_lock_init(&dev->addr_list_lock);
5588
	netdev_set_addr_lockdep_class(dev);
L
Linus Torvalds 已提交
5589 5590 5591

	dev->iflink = -1;

5592 5593 5594 5595
	ret = dev_get_valid_name(dev, dev->name);
	if (ret < 0)
		goto out;

L
Linus Torvalds 已提交
5596
	/* Init, if this function is available */
5597 5598
	if (dev->netdev_ops->ndo_init) {
		ret = dev->netdev_ops->ndo_init(dev);
L
Linus Torvalds 已提交
5599 5600 5601
		if (ret) {
			if (ret > 0)
				ret = -EIO;
5602
			goto out;
L
Linus Torvalds 已提交
5603 5604
		}
	}
5605

5606 5607 5608 5609 5610 5611
	ret = -EBUSY;
	if (!dev->ifindex)
		dev->ifindex = dev_new_index(net);
	else if (__dev_get_by_index(net, dev->ifindex))
		goto err_uninit;

L
Linus Torvalds 已提交
5612 5613 5614
	if (dev->iflink == -1)
		dev->iflink = dev->ifindex;

5615 5616 5617 5618
	/* Transfer changeable features to wanted_features and enable
	 * software offloads (GSO and GRO).
	 */
	dev->hw_features |= NETIF_F_SOFT_FEATURES;
5619 5620
	dev->features |= NETIF_F_SOFT_FEATURES;
	dev->wanted_features = dev->features & dev->hw_features;
L
Linus Torvalds 已提交
5621

5622
	/* Turn on no cache copy if HW is doing checksum */
5623 5624 5625 5626 5627 5628
	if (!(dev->flags & IFF_LOOPBACK)) {
		dev->hw_features |= NETIF_F_NOCACHE_COPY;
		if (dev->features & NETIF_F_ALL_CSUM) {
			dev->wanted_features |= NETIF_F_NOCACHE_COPY;
			dev->features |= NETIF_F_NOCACHE_COPY;
		}
5629 5630
	}

5631
	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5632
	 */
5633
	dev->vlan_features |= NETIF_F_HIGHDMA;
5634

5635 5636 5637 5638 5639
	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
	ret = notifier_to_errno(ret);
	if (ret)
		goto err_uninit;

5640
	ret = netdev_register_kobject(dev);
5641
	if (ret)
5642
		goto err_uninit;
5643 5644
	dev->reg_state = NETREG_REGISTERED;

5645
	__netdev_update_features(dev);
5646

L
Linus Torvalds 已提交
5647 5648 5649 5650 5651 5652 5653
	/*
	 *	Default initial state at registry is that the
	 *	device is present.
	 */

	set_bit(__LINK_STATE_PRESENT, &dev->state);

5654 5655
	linkwatch_init_dev(dev);

L
Linus Torvalds 已提交
5656 5657
	dev_init_scheduler(dev);
	dev_hold(dev);
5658
	list_netdevice(dev);
5659
	add_device_randomness(dev->dev_addr, dev->addr_len);
L
Linus Torvalds 已提交
5660 5661

	/* Notify protocols, that a new device appeared. */
5662
	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5663
	ret = notifier_to_errno(ret);
5664 5665 5666 5667
	if (ret) {
		rollback_registered(dev);
		dev->reg_state = NETREG_UNREGISTERED;
	}
5668 5669 5670 5671
	/*
	 *	Prevent userspace races by waiting until the network
	 *	device is fully setup before sending notifications.
	 */
5672 5673 5674
	if (!dev->rtnl_link_ops ||
	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
L
Linus Torvalds 已提交
5675 5676 5677

out:
	return ret;
5678 5679

err_uninit:
5680 5681
	if (dev->netdev_ops->ndo_uninit)
		dev->netdev_ops->ndo_uninit(dev);
5682
	goto out;
L
Linus Torvalds 已提交
5683
}
E
Eric Dumazet 已提交
5684
EXPORT_SYMBOL(register_netdevice);
L
Linus Torvalds 已提交
5685

5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716
/**
 *	init_dummy_netdev	- init a dummy network device for NAPI
 *	@dev: device to init
 *
 *	This takes a network device structure and initialize the minimum
 *	amount of fields so it can be used to schedule NAPI polls without
 *	registering a full blown interface. This is to be used by drivers
 *	that need to tie several hardware interfaces to a single NAPI
 *	poll scheduler due to HW limitations.
 */
int init_dummy_netdev(struct net_device *dev)
{
	/* Clear everything. Note we don't initialize spinlocks
	 * are they aren't supposed to be taken by any of the
	 * NAPI code and this dummy netdev is supposed to be
	 * only ever used for NAPI polls
	 */
	memset(dev, 0, sizeof(struct net_device));

	/* make sure we BUG if trying to hit standard
	 * register/unregister code path
	 */
	dev->reg_state = NETREG_DUMMY;

	/* NAPI wants this */
	INIT_LIST_HEAD(&dev->napi_list);

	/* a dummy interface is started by default */
	set_bit(__LINK_STATE_PRESENT, &dev->state);
	set_bit(__LINK_STATE_START, &dev->state);

E
Eric Dumazet 已提交
5717 5718 5719 5720 5721
	/* Note : We dont allocate pcpu_refcnt for dummy devices,
	 * because users of this 'device' dont need to change
	 * its refcount.
	 */

5722 5723 5724 5725 5726
	return 0;
}
EXPORT_SYMBOL_GPL(init_dummy_netdev);


L
Linus Torvalds 已提交
5727 5728 5729 5730 5731 5732 5733 5734 5735
/**
 *	register_netdev	- register a network device
 *	@dev: device to register
 *
 *	Take a completed network device structure and add it to the kernel
 *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
 *	chain. 0 is returned on success. A negative errno code is returned
 *	on a failure to set up the device, or if the name is a duplicate.
 *
5736
 *	This is a wrapper around register_netdevice that takes the rtnl semaphore
L
Linus Torvalds 已提交
5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750
 *	and expands the device name if you passed a format string to
 *	alloc_netdev.
 */
int register_netdev(struct net_device *dev)
{
	int err;

	rtnl_lock();
	err = register_netdevice(dev);
	rtnl_unlock();
	return err;
}
EXPORT_SYMBOL(register_netdev);

E
Eric Dumazet 已提交
5751 5752 5753 5754 5755 5756 5757 5758 5759 5760
int netdev_refcnt_read(const struct net_device *dev)
{
	int i, refcnt = 0;

	for_each_possible_cpu(i)
		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
	return refcnt;
}
EXPORT_SYMBOL(netdev_refcnt_read);

5761
/**
L
Linus Torvalds 已提交
5762
 * netdev_wait_allrefs - wait until all references are gone.
5763
 * @dev: target net_device
L
Linus Torvalds 已提交
5764 5765 5766 5767 5768 5769 5770
 *
 * This is called when unregistering network devices.
 *
 * Any protocol or device that holds a reference should register
 * for netdevice notification, and cleanup and put back the
 * reference if they receive an UNREGISTER event.
 * We can get stuck here if buggy protocols don't correctly
5771
 * call dev_put.
L
Linus Torvalds 已提交
5772 5773 5774 5775
 */
static void netdev_wait_allrefs(struct net_device *dev)
{
	unsigned long rebroadcast_time, warning_time;
E
Eric Dumazet 已提交
5776
	int refcnt;
L
Linus Torvalds 已提交
5777

5778 5779
	linkwatch_forget_dev(dev);

L
Linus Torvalds 已提交
5780
	rebroadcast_time = warning_time = jiffies;
E
Eric Dumazet 已提交
5781 5782 5783
	refcnt = netdev_refcnt_read(dev);

	while (refcnt != 0) {
L
Linus Torvalds 已提交
5784
		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5785
			rtnl_lock();
L
Linus Torvalds 已提交
5786 5787

			/* Rebroadcast unregister notification */
5788
			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5789 5790

			__rtnl_unlock();
5791
			rcu_barrier();
5792 5793
			rtnl_lock();

5794
			call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
L
Linus Torvalds 已提交
5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805
			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
				     &dev->state)) {
				/* We must not have linkwatch events
				 * pending on unregister. If this
				 * happens, we simply run the queue
				 * unscheduled, resulting in a noop
				 * for this device.
				 */
				linkwatch_run_queue();
			}

5806
			__rtnl_unlock();
L
Linus Torvalds 已提交
5807 5808 5809 5810 5811 5812

			rebroadcast_time = jiffies;
		}

		msleep(250);

E
Eric Dumazet 已提交
5813 5814
		refcnt = netdev_refcnt_read(dev);

L
Linus Torvalds 已提交
5815
		if (time_after(jiffies, warning_time + 10 * HZ)) {
5816 5817
			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
				 dev->name, refcnt);
L
Linus Torvalds 已提交
5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836
			warning_time = jiffies;
		}
	}
}

/* The sequence is:
 *
 *	rtnl_lock();
 *	...
 *	register_netdevice(x1);
 *	register_netdevice(x2);
 *	...
 *	unregister_netdevice(y1);
 *	unregister_netdevice(y2);
 *      ...
 *	rtnl_unlock();
 *	free_netdev(y1);
 *	free_netdev(y2);
 *
H
Herbert Xu 已提交
5837
 * We are invoked by rtnl_unlock().
L
Linus Torvalds 已提交
5838
 * This allows us to deal with problems:
5839
 * 1) We can delete sysfs objects which invoke hotplug
L
Linus Torvalds 已提交
5840 5841 5842
 *    without deadlocking with linkwatch via keventd.
 * 2) Since we run with the RTNL semaphore not held, we can sleep
 *    safely in order to wait for the netdev refcnt to drop to zero.
H
Herbert Xu 已提交
5843 5844 5845
 *
 * We must not return until all unregister events added during
 * the interval the lock was held have been completed.
L
Linus Torvalds 已提交
5846 5847 5848
 */
void netdev_run_todo(void)
{
5849
	struct list_head list;
L
Linus Torvalds 已提交
5850 5851

	/* Snapshot list, allow later requests */
5852
	list_replace_init(&net_todo_list, &list);
H
Herbert Xu 已提交
5853 5854

	__rtnl_unlock();
5855

5856 5857

	/* Wait for rcu callbacks to finish before next phase */
5858 5859 5860
	if (!list_empty(&list))
		rcu_barrier();

L
Linus Torvalds 已提交
5861 5862
	while (!list_empty(&list)) {
		struct net_device *dev
5863
			= list_first_entry(&list, struct net_device, todo_list);
L
Linus Torvalds 已提交
5864 5865
		list_del(&dev->todo_list);

5866
		rtnl_lock();
5867
		call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5868
		__rtnl_unlock();
5869

5870
		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5871
			pr_err("network todo '%s' but state %d\n",
5872 5873 5874 5875
			       dev->name, dev->reg_state);
			dump_stack();
			continue;
		}
L
Linus Torvalds 已提交
5876

5877
		dev->reg_state = NETREG_UNREGISTERED;
L
Linus Torvalds 已提交
5878

5879
		on_each_cpu(flush_backlog, dev, 1);
5880

5881
		netdev_wait_allrefs(dev);
L
Linus Torvalds 已提交
5882

5883
		/* paranoia */
E
Eric Dumazet 已提交
5884
		BUG_ON(netdev_refcnt_read(dev));
5885 5886
		WARN_ON(rcu_access_pointer(dev->ip_ptr));
		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5887
		WARN_ON(dev->dn_ptr);
L
Linus Torvalds 已提交
5888

5889 5890
		if (dev->destructor)
			dev->destructor(dev);
5891 5892 5893

		/* Free network device */
		kobject_put(&dev->dev.kobj);
L
Linus Torvalds 已提交
5894 5895 5896
	}
}

5897 5898 5899
/* Convert net_device_stats to rtnl_link_stats64.  They have the same
 * fields in the same order, with only the type differing.
 */
5900 5901
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
			     const struct net_device_stats *netdev_stats)
5902 5903
{
#if BITS_PER_LONG == 64
5904 5905
	BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
	memcpy(stats64, netdev_stats, sizeof(*stats64));
5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916
#else
	size_t i, n = sizeof(*stats64) / sizeof(u64);
	const unsigned long *src = (const unsigned long *)netdev_stats;
	u64 *dst = (u64 *)stats64;

	BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
		     sizeof(*stats64) / sizeof(u64));
	for (i = 0; i < n; i++)
		dst[i] = src[i];
#endif
}
5917
EXPORT_SYMBOL(netdev_stats_to_stats64);
5918

5919 5920 5921
/**
 *	dev_get_stats	- get network device statistics
 *	@dev: device to get statistics from
5922
 *	@storage: place to store stats
5923
 *
5924 5925 5926 5927
 *	Get network statistics from device. Return @storage.
 *	The device driver may provide its own method by setting
 *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
 *	otherwise the internal statistics structure is used.
5928
 */
5929 5930
struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
					struct rtnl_link_stats64 *storage)
5931
{
5932 5933
	const struct net_device_ops *ops = dev->netdev_ops;

5934 5935
	if (ops->ndo_get_stats64) {
		memset(storage, 0, sizeof(*storage));
5936 5937
		ops->ndo_get_stats64(dev, storage);
	} else if (ops->ndo_get_stats) {
5938
		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5939 5940
	} else {
		netdev_stats_to_stats64(storage, &dev->stats);
5941
	}
5942
	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5943
	return storage;
R
Rusty Russell 已提交
5944
}
5945
EXPORT_SYMBOL(dev_get_stats);
R
Rusty Russell 已提交
5946

5947
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5948
{
5949
	struct netdev_queue *queue = dev_ingress_queue(dev);
5950

5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962
#ifdef CONFIG_NET_CLS_ACT
	if (queue)
		return queue;
	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
	if (!queue)
		return NULL;
	netdev_init_one_queue(dev, queue, NULL);
	queue->qdisc = &noop_qdisc;
	queue->qdisc_sleeping = &noop_qdisc;
	rcu_assign_pointer(dev->ingress_queue, queue);
#endif
	return queue;
5963 5964
}

L
Linus Torvalds 已提交
5965
/**
T
Tom Herbert 已提交
5966
 *	alloc_netdev_mqs - allocate network device
L
Linus Torvalds 已提交
5967 5968 5969
 *	@sizeof_priv:	size of private data to allocate space for
 *	@name:		device name format string
 *	@setup:		callback to initialize device
T
Tom Herbert 已提交
5970 5971
 *	@txqs:		the number of TX subqueues to allocate
 *	@rxqs:		the number of RX subqueues to allocate
L
Linus Torvalds 已提交
5972 5973
 *
 *	Allocates a struct net_device with private data area for driver use
5974
 *	and performs basic initialization.  Also allocates subquue structs
T
Tom Herbert 已提交
5975
 *	for each queue on the device.
L
Linus Torvalds 已提交
5976
 */
T
Tom Herbert 已提交
5977 5978 5979
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
		void (*setup)(struct net_device *),
		unsigned int txqs, unsigned int rxqs)
L
Linus Torvalds 已提交
5980 5981
{
	struct net_device *dev;
5982
	size_t alloc_size;
5983
	struct net_device *p;
L
Linus Torvalds 已提交
5984

5985 5986
	BUG_ON(strlen(name) >= sizeof(dev->name));

T
Tom Herbert 已提交
5987
	if (txqs < 1) {
5988
		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5989 5990 5991
		return NULL;
	}

T
Tom Herbert 已提交
5992 5993
#ifdef CONFIG_RPS
	if (rxqs < 1) {
5994
		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
T
Tom Herbert 已提交
5995 5996 5997 5998
		return NULL;
	}
#endif

5999
	alloc_size = sizeof(struct net_device);
6000 6001
	if (sizeof_priv) {
		/* ensure 32-byte alignment of private area */
6002
		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6003 6004 6005
		alloc_size += sizeof_priv;
	}
	/* ensure 32-byte alignment of whole construct */
6006
	alloc_size += NETDEV_ALIGN - 1;
L
Linus Torvalds 已提交
6007

6008
	p = kzalloc(alloc_size, GFP_KERNEL);
L
Linus Torvalds 已提交
6009
	if (!p) {
6010
		pr_err("alloc_netdev: Unable to allocate device\n");
L
Linus Torvalds 已提交
6011 6012 6013
		return NULL;
	}

6014
	dev = PTR_ALIGN(p, NETDEV_ALIGN);
L
Linus Torvalds 已提交
6015
	dev->padded = (char *)dev - (char *)p;
6016

E
Eric Dumazet 已提交
6017 6018
	dev->pcpu_refcnt = alloc_percpu(int);
	if (!dev->pcpu_refcnt)
6019
		goto free_p;
6020 6021

	if (dev_addr_init(dev))
E
Eric Dumazet 已提交
6022
		goto free_pcpu;
6023

6024
	dev_mc_init(dev);
6025
	dev_uc_init(dev);
J
Jiri Pirko 已提交
6026

6027
	dev_net_set(dev, &init_net);
L
Linus Torvalds 已提交
6028

6029
	dev->gso_max_size = GSO_MAX_SIZE;
6030
	dev->gso_max_segs = GSO_MAX_SEGS;
6031 6032 6033 6034 6035 6036 6037

	INIT_LIST_HEAD(&dev->napi_list);
	INIT_LIST_HEAD(&dev->unreg_list);
	INIT_LIST_HEAD(&dev->link_watch_list);
	dev->priv_flags = IFF_XMIT_DST_RELEASE;
	setup(dev);

T
Tom Herbert 已提交
6038 6039
	dev->num_tx_queues = txqs;
	dev->real_num_tx_queues = txqs;
6040
	if (netif_alloc_netdev_queues(dev))
6041
		goto free_all;
6042

E
Eric Dumazet 已提交
6043
#ifdef CONFIG_RPS
T
Tom Herbert 已提交
6044 6045
	dev->num_rx_queues = rxqs;
	dev->real_num_rx_queues = rxqs;
T
Tom Herbert 已提交
6046
	if (netif_alloc_rx_queues(dev))
6047
		goto free_all;
E
Eric Dumazet 已提交
6048
#endif
T
Tom Herbert 已提交
6049

L
Linus Torvalds 已提交
6050
	strcpy(dev->name, name);
6051
	dev->group = INIT_NETDEV_GROUP;
L
Linus Torvalds 已提交
6052
	return dev;
6053

6054 6055 6056 6057
free_all:
	free_netdev(dev);
	return NULL;

E
Eric Dumazet 已提交
6058 6059
free_pcpu:
	free_percpu(dev->pcpu_refcnt);
6060
	kfree(dev->_tx);
T
Tom Herbert 已提交
6061 6062 6063 6064
#ifdef CONFIG_RPS
	kfree(dev->_rx);
#endif

6065 6066 6067
free_p:
	kfree(p);
	return NULL;
L
Linus Torvalds 已提交
6068
}
T
Tom Herbert 已提交
6069
EXPORT_SYMBOL(alloc_netdev_mqs);
L
Linus Torvalds 已提交
6070 6071 6072 6073 6074

/**
 *	free_netdev - free network device
 *	@dev: device
 *
6075 6076
 *	This function does the last stage of destroying an allocated device
 * 	interface. The reference to the device object is released.
L
Linus Torvalds 已提交
6077 6078 6079 6080
 *	If this is the last reference then it will be freed.
 */
void free_netdev(struct net_device *dev)
{
6081 6082
	struct napi_struct *p, *n;

6083 6084
	release_net(dev_net(dev));

6085
	kfree(dev->_tx);
T
Tom Herbert 已提交
6086 6087 6088
#ifdef CONFIG_RPS
	kfree(dev->_rx);
#endif
6089

6090
	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6091

6092 6093 6094
	/* Flush device addresses */
	dev_addr_flush(dev);

6095 6096 6097
	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
		netif_napi_del(p);

E
Eric Dumazet 已提交
6098 6099 6100
	free_percpu(dev->pcpu_refcnt);
	dev->pcpu_refcnt = NULL;

S
Stephen Hemminger 已提交
6101
	/*  Compatibility with error handling in drivers */
L
Linus Torvalds 已提交
6102 6103 6104 6105 6106 6107 6108 6109
	if (dev->reg_state == NETREG_UNINITIALIZED) {
		kfree((char *)dev - dev->padded);
		return;
	}

	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
	dev->reg_state = NETREG_RELEASED;

6110 6111
	/* will free via device release */
	put_device(&dev->dev);
L
Linus Torvalds 已提交
6112
}
E
Eric Dumazet 已提交
6113
EXPORT_SYMBOL(free_netdev);
6114

6115 6116 6117 6118 6119 6120
/**
 *	synchronize_net -  Synchronize with packet receive processing
 *
 *	Wait for packets currently being received to be done.
 *	Does not block later packets from starting.
 */
6121
void synchronize_net(void)
L
Linus Torvalds 已提交
6122 6123
{
	might_sleep();
6124 6125 6126 6127
	if (rtnl_is_locked())
		synchronize_rcu_expedited();
	else
		synchronize_rcu();
L
Linus Torvalds 已提交
6128
}
E
Eric Dumazet 已提交
6129
EXPORT_SYMBOL(synchronize_net);
L
Linus Torvalds 已提交
6130 6131

/**
6132
 *	unregister_netdevice_queue - remove device from the kernel
L
Linus Torvalds 已提交
6133
 *	@dev: device
6134
 *	@head: list
6135
 *
L
Linus Torvalds 已提交
6136
 *	This function shuts down a device interface and removes it
6137
 *	from the kernel tables.
6138
 *	If head not NULL, device is queued to be unregistered later.
L
Linus Torvalds 已提交
6139 6140 6141 6142 6143
 *
 *	Callers must hold the rtnl semaphore.  You may want
 *	unregister_netdev() instead of this.
 */

6144
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
L
Linus Torvalds 已提交
6145
{
6146 6147
	ASSERT_RTNL();

6148
	if (head) {
6149
		list_move_tail(&dev->unreg_list, head);
6150 6151 6152 6153 6154
	} else {
		rollback_registered(dev);
		/* Finish processing unregister after unlock */
		net_set_todo(dev);
	}
L
Linus Torvalds 已提交
6155
}
6156
EXPORT_SYMBOL(unregister_netdevice_queue);
L
Linus Torvalds 已提交
6157

6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171
/**
 *	unregister_netdevice_many - unregister many devices
 *	@head: list of devices
 */
void unregister_netdevice_many(struct list_head *head)
{
	struct net_device *dev;

	if (!list_empty(head)) {
		rollback_registered_many(head);
		list_for_each_entry(dev, head, unreg_list)
			net_set_todo(dev);
	}
}
6172
EXPORT_SYMBOL(unregister_netdevice_many);
6173

L
Linus Torvalds 已提交
6174 6175 6176 6177 6178
/**
 *	unregister_netdev - remove device from the kernel
 *	@dev: device
 *
 *	This function shuts down a device interface and removes it
6179
 *	from the kernel tables.
L
Linus Torvalds 已提交
6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192
 *
 *	This is just a wrapper for unregister_netdevice that takes
 *	the rtnl semaphore.  In general you want to use this and not
 *	unregister_netdevice.
 */
void unregister_netdev(struct net_device *dev)
{
	rtnl_lock();
	unregister_netdevice(dev);
	rtnl_unlock();
}
EXPORT_SYMBOL(unregister_netdev);

6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224
/**
 *	dev_change_net_namespace - move device to different nethost namespace
 *	@dev: device
 *	@net: network namespace
 *	@pat: If not NULL name pattern to try if the current device name
 *	      is already taken in the destination network namespace.
 *
 *	This function shuts down a device interface and moves it
 *	to a new network namespace. On success 0 is returned, on
 *	a failure a netagive errno code is returned.
 *
 *	Callers must hold the rtnl semaphore.
 */

int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
{
	int err;

	ASSERT_RTNL();

	/* Don't allow namespace local devices to be moved. */
	err = -EINVAL;
	if (dev->features & NETIF_F_NETNS_LOCAL)
		goto out;

	/* Ensure the device has been registrered */
	err = -EINVAL;
	if (dev->reg_state != NETREG_REGISTERED)
		goto out;

	/* Get out if there is nothing todo */
	err = 0;
6225
	if (net_eq(dev_net(dev), net))
6226 6227 6228 6229 6230 6231
		goto out;

	/* Pick the destination device name, and ensure
	 * we can use it in the destination network namespace.
	 */
	err = -EEXIST;
6232
	if (__dev_get_by_name(net, dev->name)) {
6233 6234 6235
		/* We get here if we can't use the current device name */
		if (!pat)
			goto out;
6236
		if (dev_get_valid_name(dev, pat) < 0)
6237 6238 6239 6240 6241 6242 6243 6244
			goto out;
	}

	/*
	 * And now a mini version of register_netdevice unregister_netdevice.
	 */

	/* If device is running close it first. */
6245
	dev_close(dev);
6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257

	/* And unlink it from device chain */
	err = -ENODEV;
	unlist_netdevice(dev);

	synchronize_net();

	/* Shutdown queueing discipline. */
	dev_shutdown(dev);

	/* Notify protocols, that we are about to destroy
	   this device. They should clean all the things.
6258 6259 6260 6261

	   Note that dev->reg_state stays at NETREG_REGISTERED.
	   This is wanted because this way 8021q and macvlan know
	   the device is just moving and can keep their slaves up.
6262 6263
	*/
	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6264 6265
	rcu_barrier();
	call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6266
	rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6267 6268 6269 6270

	/*
	 *	Flush the unicast and multicast chains
	 */
6271
	dev_uc_flush(dev);
6272
	dev_mc_flush(dev);
6273 6274

	/* Actually switch the network namespace */
6275
	dev_net_set(dev, net);
6276 6277 6278 6279 6280 6281 6282 6283 6284

	/* If there is an ifindex conflict assign a new one */
	if (__dev_get_by_index(net, dev->ifindex)) {
		int iflink = (dev->iflink == dev->ifindex);
		dev->ifindex = dev_new_index(net);
		if (iflink)
			dev->iflink = dev->ifindex;
	}

6285
	/* Fixup kobjects */
6286
	err = device_rename(&dev->dev, dev->name);
6287
	WARN_ON(err);
6288 6289 6290 6291 6292 6293 6294

	/* Add the device back in the hashes */
	list_netdevice(dev);

	/* Notify protocols, that a new device appeared. */
	call_netdevice_notifiers(NETDEV_REGISTER, dev);

6295 6296 6297 6298 6299 6300
	/*
	 *	Prevent userspace races by waiting until the network
	 *	device is fully setup before sending notifications.
	 */
	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);

6301 6302 6303 6304 6305
	synchronize_net();
	err = 0;
out:
	return err;
}
6306
EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6307

L
Linus Torvalds 已提交
6308 6309 6310 6311 6312 6313 6314 6315 6316
static int dev_cpu_callback(struct notifier_block *nfb,
			    unsigned long action,
			    void *ocpu)
{
	struct sk_buff **list_skb;
	struct sk_buff *skb;
	unsigned int cpu, oldcpu = (unsigned long)ocpu;
	struct softnet_data *sd, *oldsd;

6317
	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
L
Linus Torvalds 已提交
6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333
		return NOTIFY_OK;

	local_irq_disable();
	cpu = smp_processor_id();
	sd = &per_cpu(softnet_data, cpu);
	oldsd = &per_cpu(softnet_data, oldcpu);

	/* Find end of our completion_queue. */
	list_skb = &sd->completion_queue;
	while (*list_skb)
		list_skb = &(*list_skb)->next;
	/* Append completion queue from offline CPU. */
	*list_skb = oldsd->completion_queue;
	oldsd->completion_queue = NULL;

	/* Append output queue from offline CPU. */
6334 6335 6336 6337 6338 6339
	if (oldsd->output_queue) {
		*sd->output_queue_tailp = oldsd->output_queue;
		sd->output_queue_tailp = oldsd->output_queue_tailp;
		oldsd->output_queue = NULL;
		oldsd->output_queue_tailp = &oldsd->output_queue;
	}
6340 6341 6342 6343 6344
	/* Append NAPI poll list from offline CPU. */
	if (!list_empty(&oldsd->poll_list)) {
		list_splice_init(&oldsd->poll_list, &sd->poll_list);
		raise_softirq_irqoff(NET_RX_SOFTIRQ);
	}
L
Linus Torvalds 已提交
6345 6346 6347 6348 6349

	raise_softirq_irqoff(NET_TX_SOFTIRQ);
	local_irq_enable();

	/* Process offline CPU's input_pkt_queue */
6350
	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
L
Linus Torvalds 已提交
6351
		netif_rx(skb);
6352
		input_queue_head_incr(oldsd);
T
Tom Herbert 已提交
6353
	}
6354
	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6355
		netif_rx(skb);
6356 6357
		input_queue_head_incr(oldsd);
	}
L
Linus Torvalds 已提交
6358 6359 6360 6361 6362

	return NOTIFY_OK;
}


6363
/**
6364 6365 6366 6367
 *	netdev_increment_features - increment feature set by one
 *	@all: current feature set
 *	@one: new feature set
 *	@mask: mask feature set
6368 6369
 *
 *	Computes a new feature set after adding a device with feature set
6370 6371
 *	@one to the master device with current feature set @all.  Will not
 *	enable anything that is off in @mask. Returns the new feature set.
6372
 */
6373 6374
netdev_features_t netdev_increment_features(netdev_features_t all,
	netdev_features_t one, netdev_features_t mask)
6375
{
6376 6377 6378
	if (mask & NETIF_F_GEN_CSUM)
		mask |= NETIF_F_ALL_CSUM;
	mask |= NETIF_F_VLAN_CHALLENGED;
6379

6380 6381
	all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
	all &= one | ~NETIF_F_ALL_FOR_ALL;
6382

6383 6384 6385
	/* If one device supports hw checksumming, set for all. */
	if (all & NETIF_F_GEN_CSUM)
		all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6386 6387 6388

	return all;
}
6389
EXPORT_SYMBOL(netdev_increment_features);
6390

6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403
static struct hlist_head *netdev_create_hash(void)
{
	int i;
	struct hlist_head *hash;

	hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
	if (hash != NULL)
		for (i = 0; i < NETDEV_HASHENTRIES; i++)
			INIT_HLIST_HEAD(&hash[i]);

	return hash;
}

6404
/* Initialize per network namespace state */
6405
static int __net_init netdev_init(struct net *net)
6406
{
6407 6408
	if (net != &init_net)
		INIT_LIST_HEAD(&net->dev_base_head);
6409

6410 6411 6412
	net->dev_name_head = netdev_create_hash();
	if (net->dev_name_head == NULL)
		goto err_name;
6413

6414 6415 6416
	net->dev_index_head = netdev_create_hash();
	if (net->dev_index_head == NULL)
		goto err_idx;
6417 6418

	return 0;
6419 6420 6421 6422 6423

err_idx:
	kfree(net->dev_name_head);
err_name:
	return -ENOMEM;
6424 6425
}

6426 6427 6428 6429 6430 6431
/**
 *	netdev_drivername - network driver for the device
 *	@dev: network device
 *
 *	Determine network driver for device.
 */
6432
const char *netdev_drivername(const struct net_device *dev)
6433
{
6434 6435
	const struct device_driver *driver;
	const struct device *parent;
6436
	const char *empty = "";
6437 6438 6439

	parent = dev->dev.parent;
	if (!parent)
6440
		return empty;
6441 6442 6443

	driver = parent->driver;
	if (driver && driver->name)
6444 6445
		return driver->name;
	return empty;
6446 6447
}

6448
int __netdev_printk(const char *level, const struct net_device *dev,
6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462
			   struct va_format *vaf)
{
	int r;

	if (dev && dev->dev.parent)
		r = dev_printk(level, dev->dev.parent, "%s: %pV",
			       netdev_name(dev), vaf);
	else if (dev)
		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
	else
		r = printk("%s(NULL net_device): %pV", level, vaf);

	return r;
}
6463
EXPORT_SYMBOL(__netdev_printk);
6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510

int netdev_printk(const char *level, const struct net_device *dev,
		  const char *format, ...)
{
	struct va_format vaf;
	va_list args;
	int r;

	va_start(args, format);

	vaf.fmt = format;
	vaf.va = &args;

	r = __netdev_printk(level, dev, &vaf);
	va_end(args);

	return r;
}
EXPORT_SYMBOL(netdev_printk);

#define define_netdev_printk_level(func, level)			\
int func(const struct net_device *dev, const char *fmt, ...)	\
{								\
	int r;							\
	struct va_format vaf;					\
	va_list args;						\
								\
	va_start(args, fmt);					\
								\
	vaf.fmt = fmt;						\
	vaf.va = &args;						\
								\
	r = __netdev_printk(level, dev, &vaf);			\
	va_end(args);						\
								\
	return r;						\
}								\
EXPORT_SYMBOL(func);

define_netdev_printk_level(netdev_emerg, KERN_EMERG);
define_netdev_printk_level(netdev_alert, KERN_ALERT);
define_netdev_printk_level(netdev_crit, KERN_CRIT);
define_netdev_printk_level(netdev_err, KERN_ERR);
define_netdev_printk_level(netdev_warn, KERN_WARNING);
define_netdev_printk_level(netdev_notice, KERN_NOTICE);
define_netdev_printk_level(netdev_info, KERN_INFO);

6511
static void __net_exit netdev_exit(struct net *net)
6512 6513 6514 6515 6516
{
	kfree(net->dev_name_head);
	kfree(net->dev_index_head);
}

6517
static struct pernet_operations __net_initdata netdev_net_ops = {
6518 6519 6520 6521
	.init = netdev_init,
	.exit = netdev_exit,
};

6522
static void __net_exit default_device_exit(struct net *net)
6523
{
6524
	struct net_device *dev, *aux;
6525
	/*
6526
	 * Push all migratable network devices back to the
6527 6528 6529
	 * initial network namespace
	 */
	rtnl_lock();
6530
	for_each_netdev_safe(net, dev, aux) {
6531
		int err;
6532
		char fb_name[IFNAMSIZ];
6533 6534 6535 6536 6537

		/* Ignore unmoveable devices (i.e. loopback) */
		if (dev->features & NETIF_F_NETNS_LOCAL)
			continue;

6538 6539 6540
		/* Leave virtual devices for the generic cleanup */
		if (dev->rtnl_link_ops)
			continue;
6541

L
Lucas De Marchi 已提交
6542
		/* Push remaining network devices to init_net */
6543 6544
		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
		err = dev_change_net_namespace(dev, &init_net, fb_name);
6545
		if (err) {
6546 6547
			pr_emerg("%s: failed to move %s to init_net: %d\n",
				 __func__, dev->name, err);
6548
			BUG();
6549 6550 6551 6552 6553
		}
	}
	rtnl_unlock();
}

6554 6555 6556
static void __net_exit default_device_exit_batch(struct list_head *net_list)
{
	/* At exit all network devices most be removed from a network
6557
	 * namespace.  Do this in the reverse order of registration.
6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574
	 * Do this across as many network namespaces as possible to
	 * improve batching efficiency.
	 */
	struct net_device *dev;
	struct net *net;
	LIST_HEAD(dev_kill_list);

	rtnl_lock();
	list_for_each_entry(net, net_list, exit_list) {
		for_each_netdev_reverse(net, dev) {
			if (dev->rtnl_link_ops)
				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
			else
				unregister_netdevice_queue(dev, &dev_kill_list);
		}
	}
	unregister_netdevice_many(&dev_kill_list);
E
Eric Dumazet 已提交
6575
	list_del(&dev_kill_list);
6576 6577 6578
	rtnl_unlock();
}

6579
static struct pernet_operations __net_initdata default_device_ops = {
6580
	.exit = default_device_exit,
6581
	.exit_batch = default_device_exit_batch,
6582 6583
};

L
Linus Torvalds 已提交
6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603
/*
 *	Initialize the DEV module. At boot time this walks the device list and
 *	unhooks any devices that fail to initialise (normally hardware not
 *	present) and leaves us with a valid list of present and active devices.
 *
 */

/*
 *       This is called single threaded during boot, so no need
 *       to take the rtnl semaphore.
 */
static int __init net_dev_init(void)
{
	int i, rc = -ENOMEM;

	BUG_ON(!dev_boot_phase);

	if (dev_proc_init())
		goto out;

6604
	if (netdev_kobject_init())
L
Linus Torvalds 已提交
6605 6606 6607
		goto out;

	INIT_LIST_HEAD(&ptype_all);
6608
	for (i = 0; i < PTYPE_HASH_SIZE; i++)
L
Linus Torvalds 已提交
6609 6610
		INIT_LIST_HEAD(&ptype_base[i]);

6611 6612
	if (register_pernet_subsys(&netdev_net_ops))
		goto out;
L
Linus Torvalds 已提交
6613 6614 6615 6616 6617

	/*
	 *	Initialise the packet receive queues.
	 */

6618
	for_each_possible_cpu(i) {
E
Eric Dumazet 已提交
6619
		struct softnet_data *sd = &per_cpu(softnet_data, i);
L
Linus Torvalds 已提交
6620

C
Changli Gao 已提交
6621
		memset(sd, 0, sizeof(*sd));
E
Eric Dumazet 已提交
6622
		skb_queue_head_init(&sd->input_pkt_queue);
6623
		skb_queue_head_init(&sd->process_queue);
E
Eric Dumazet 已提交
6624 6625
		sd->completion_queue = NULL;
		INIT_LIST_HEAD(&sd->poll_list);
6626 6627
		sd->output_queue = NULL;
		sd->output_queue_tailp = &sd->output_queue;
E
Eric Dumazet 已提交
6628
#ifdef CONFIG_RPS
E
Eric Dumazet 已提交
6629 6630 6631 6632
		sd->csd.func = rps_trigger_softirq;
		sd->csd.info = sd;
		sd->csd.flags = 0;
		sd->cpu = i;
6633
#endif
T
Tom Herbert 已提交
6634

E
Eric Dumazet 已提交
6635 6636 6637 6638
		sd->backlog.poll = process_backlog;
		sd->backlog.weight = weight_p;
		sd->backlog.gro_list = NULL;
		sd->backlog.gro_count = 0;
L
Linus Torvalds 已提交
6639 6640 6641 6642
	}

	dev_boot_phase = 0;

6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657
	/* The loopback device is special if any other network devices
	 * is present in a network namespace the loopback device must
	 * be present. Since we now dynamically allocate and free the
	 * loopback device ensure this invariant is maintained by
	 * keeping the loopback device as the first device on the
	 * list of network devices.  Ensuring the loopback devices
	 * is the first device that appears and the last network device
	 * that disappears.
	 */
	if (register_pernet_device(&loopback_net_ops))
		goto out;

	if (register_pernet_device(&default_device_ops))
		goto out;

6658 6659
	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
L
Linus Torvalds 已提交
6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670

	hotcpu_notifier(dev_cpu_callback, 0);
	dst_init();
	dev_mcast_init();
	rc = 0;
out:
	return rc;
}

subsys_initcall(net_dev_init);

6671 6672
static int __init initialize_hashrnd(void)
{
T
Tom Herbert 已提交
6673
	get_random_bytes(&hashrnd, sizeof(hashrnd));
6674 6675 6676 6677 6678
	return 0;
}

late_initcall_sync(initialize_hashrnd);