dev.c 98.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * 	NET3	Protocol independent device support routines.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 *	Derived from the non IP parts of dev.c 1.0.19
10
 * 		Authors:	Ross Biro
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
 *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *				Mark Evans, <evansmp@uhura.aston.ac.uk>
 *
 *	Additional Authors:
 *		Florian la Roche <rzsfl@rz.uni-sb.de>
 *		Alan Cox <gw4pts@gw4pts.ampr.org>
 *		David Hinds <dahinds@users.sourceforge.net>
 *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
 *		Adam Sulmicki <adam@cfar.umd.edu>
 *              Pekka Riikonen <priikone@poesidon.pspt.fi>
 *
 *	Changes:
 *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
 *              			to 2 if register_netdev gets called
 *              			before net_dev_init & also removed a
 *              			few lines of code in the process.
 *		Alan Cox	:	device private ioctl copies fields back.
 *		Alan Cox	:	Transmit queue code does relevant
 *					stunts to keep the queue safe.
 *		Alan Cox	:	Fixed double lock.
 *		Alan Cox	:	Fixed promisc NULL pointer trap
 *		????????	:	Support the full private ioctl range
 *		Alan Cox	:	Moved ioctl permission check into
 *					drivers
 *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
 *		Alan Cox	:	100 backlog just doesn't cut it when
 *					you start doing multicast video 8)
 *		Alan Cox	:	Rewrote net_bh and list manager.
 *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths.
 *		Alan Cox	:	Took out transmit every packet pass
 *					Saved a few bytes in the ioctl handler
 *		Alan Cox	:	Network driver sets packet type before
 *					calling netif_rx. Saves a function
 *					call a packet.
 *		Alan Cox	:	Hashed net_bh()
 *		Richard Kooijman:	Timestamp fixes.
 *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
 *		Alan Cox	:	Device lock protection.
 *		Alan Cox	: 	Fixed nasty side effect of device close
 *					changes.
 *		Rudi Cilibrasi	:	Pass the right thing to
 *					set_mac_address()
 *		Dave Miller	:	32bit quantity for the device lock to
 *					make it work out on a Sparc.
 *		Bjorn Ekwall	:	Added KERNELD hack.
 *		Alan Cox	:	Cleaned up the backlog initialise.
 *		Craig Metz	:	SIOCGIFCONF fix if space for under
 *					1 device.
 *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
 *					is no device open function.
 *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
 *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
 *		Cyrus Durgin	:	Cleaned for KMOD
 *		Adam Sulmicki   :	Bug Fix : Network Device Unload
 *					A network device unload needs to purge
 *					the backlog queue.
 *	Paul Rusty Russell	:	SIOCSIFNAME
 *              Pekka Riikonen  :	Netdev boot-time settings code
 *              Andrew Morton   :       Make unregister_netdevice wait
 *              			indefinitely on dev->refcnt
 * 		J Hadi Salim	:	- Backlog queue sampling
 *				        - netif_rx() feedback
 */

#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
78
#include <linux/capability.h>
L
Linus Torvalds 已提交
79 80 81 82
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
A
Arjan van de Ven 已提交
83
#include <linux/mutex.h>
L
Linus Torvalds 已提交
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/notifier.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <linux/if_bridge.h>
P
Patrick McHardy 已提交
101
#include <linux/if_macvlan.h>
L
Linus Torvalds 已提交
102 103 104 105 106 107 108 109 110 111 112
#include <net/dst.h>
#include <net/pkt_sched.h>
#include <net/checksum.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/netpoll.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
113
#include <net/wext.h>
L
Linus Torvalds 已提交
114 115
#include <net/iw_handler.h>
#include <asm/current.h>
S
Steve Grubb 已提交
116
#include <linux/audit.h>
117
#include <linux/dmaengine.h>
118
#include <linux/err.h>
119
#include <linux/ctype.h>
120
#include <linux/if_arp.h>
L
Linus Torvalds 已提交
121 122 123 124 125 126 127 128 129 130 131 132

/*
 *	The list of packet types we will receive (as opposed to discard)
 *	and the routines to invoke.
 *
 *	Why 16. Because with 16 the only overlap we get on a hash of the
 *	low nibble of the protocol value is RARP/SNAP/X.25.
 *
 *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
 *             sure which should go first, but I bet it won't make much
 *             difference if we are running VLANs.  The good news is that
 *             this protocol won't be in the list unless compiled in, so
S
Stephen Hemminger 已提交
133
 *             the average user (w/out VLANs) will not be adversely affected.
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
 *             --BLG
 *
 *		0800	IP
 *		8100    802.1Q VLAN
 *		0001	802.3
 *		0002	AX.25
 *		0004	802.2
 *		8035	RARP
 *		0005	SNAP
 *		0805	X.25
 *		0806	ARP
 *		8137	IPX
 *		0009	Localtalk
 *		86DD	IPv6
 */

static DEFINE_SPINLOCK(ptype_lock);
151 152
static struct list_head ptype_base[16] __read_mostly;	/* 16 way hashed list */
static struct list_head ptype_all __read_mostly;	/* Taps */
L
Linus Torvalds 已提交
153

154
#ifdef CONFIG_NET_DMA
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
struct net_dma {
	struct dma_client client;
	spinlock_t lock;
	cpumask_t channel_mask;
	struct dma_chan *channels[NR_CPUS];
};

static enum dma_state_client
netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
	enum dma_state state);

static struct net_dma net_dma = {
	.client = {
		.event_callback = netdev_dma_event,
	},
};
171 172
#endif

L
Linus Torvalds 已提交
173
/*
174
 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
L
Linus Torvalds 已提交
175 176 177 178 179
 * semaphore.
 *
 * Pure readers hold dev_base_lock for reading.
 *
 * Writers must hold the rtnl semaphore while they loop through the
180
 * dev_base_head list, and hold dev_base_lock for writing when they do the
L
Linus Torvalds 已提交
181 182 183 184 185 186 187 188 189 190 191
 * actual updates.  This allows pure readers to access the list even
 * while a writer is preparing to update it.
 *
 * To put it another way, dev_base_lock is held for writing only to
 * protect against pure readers; the rtnl semaphore provides the
 * protection against other writers.
 *
 * See, for example usages, register_netdevice() and
 * unregister_netdevice(), which must be called with the rtnl
 * semaphore held.
 */
192
LIST_HEAD(dev_base_head);
L
Linus Torvalds 已提交
193 194
DEFINE_RWLOCK(dev_base_lock);

195
EXPORT_SYMBOL(dev_base_head);
L
Linus Torvalds 已提交
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
EXPORT_SYMBOL(dev_base_lock);

#define NETDEV_HASHBITS	8
static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];

static inline struct hlist_head *dev_name_hash(const char *name)
{
	unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
	return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
}

static inline struct hlist_head *dev_index_hash(int ifindex)
{
	return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
}

/*
 *	Our notifier list
 */

217
static RAW_NOTIFIER_HEAD(netdev_chain);
L
Linus Torvalds 已提交
218 219 220 221 222

/*
 *	Device drivers call our routines to queue packets here. We empty the
 *	queue in the local softnet handler.
 */
223
DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };
L
Linus Torvalds 已提交
224 225 226 227 228 229 230 231 232 233 234

#ifdef CONFIG_SYSFS
extern int netdev_sysfs_init(void);
extern int netdev_register_sysfs(struct net_device *);
extern void netdev_unregister_sysfs(struct net_device *);
#else
#define netdev_sysfs_init()	 	(0)
#define netdev_register_sysfs(dev)	(0)
#define	netdev_unregister_sysfs(dev)	do { } while(0)
#endif

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
 * register_netdevice() inits dev->_xmit_lock and sets lockdep class
 * according to dev->type
 */
static const unsigned short netdev_lock_type[] =
	{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
	 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
	 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
	 ARPHRD_NONE};

static const char *netdev_lock_name[] =
	{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
	 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
	 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
	 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
	 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
	 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
	 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
	 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
	 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
	 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
	 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
	 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
	 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
	 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
	 "_xmit_NONE"};

static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];

static inline unsigned short netdev_lock_pos(unsigned short dev_type)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
		if (netdev_lock_type[i] == dev_type)
			return i;
	/* the last key is used by default */
	return ARRAY_SIZE(netdev_lock_type) - 1;
}

static inline void netdev_set_lockdep_class(spinlock_t *lock,
					    unsigned short dev_type)
{
	int i;

	i = netdev_lock_pos(dev_type);
	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
				   netdev_lock_name[i]);
}
#else
static inline void netdev_set_lockdep_class(spinlock_t *lock,
					    unsigned short dev_type)
{
}
#endif
L
Linus Torvalds 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332

/*******************************************************************************

		Protocol management and registration routines

*******************************************************************************/

/*
 *	Add a protocol ID to the list. Now that the input handler is
 *	smarter we can dispense with all the messy stuff that used to be
 *	here.
 *
 *	BEWARE!!! Protocol handlers, mangling input packets,
 *	MUST BE last in hash buckets and checking protocol handlers
 *	MUST start from promiscuous ptype_all chain in net_bh.
 *	It is true now, do not change it.
 *	Explanation follows: if protocol handler, mangling packet, will
 *	be the first on list, it is not able to sense, that packet
 *	is cloned and should be copied-on-write, so that it will
 *	change it and subsequent readers will get broken packet.
 *							--ANK (980803)
 */

/**
 *	dev_add_pack - add packet handler
 *	@pt: packet type declaration
 *
 *	Add a protocol handler to the networking stack. The passed &packet_type
 *	is linked into kernel lists and may not be freed until it has been
 *	removed from the kernel lists.
 *
333
 *	This call does not sleep therefore it can not
L
Linus Torvalds 已提交
334 335 336 337 338 339 340 341 342
 *	guarantee all CPU's that are in middle of receiving packets
 *	will see the new packet type (until the next received packet).
 */

void dev_add_pack(struct packet_type *pt)
{
	int hash;

	spin_lock_bh(&ptype_lock);
S
Stephen Hemminger 已提交
343
	if (pt->type == htons(ETH_P_ALL))
L
Linus Torvalds 已提交
344
		list_add_rcu(&pt->list, &ptype_all);
S
Stephen Hemminger 已提交
345
	else {
L
Linus Torvalds 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358
		hash = ntohs(pt->type) & 15;
		list_add_rcu(&pt->list, &ptype_base[hash]);
	}
	spin_unlock_bh(&ptype_lock);
}

/**
 *	__dev_remove_pack	 - remove packet handler
 *	@pt: packet type declaration
 *
 *	Remove a protocol handler that was previously added to the kernel
 *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
 *	from the kernel lists and can be freed or reused once this function
359
 *	returns.
L
Linus Torvalds 已提交
360 361 362 363 364 365 366 367 368 369 370 371
 *
 *      The packet type might still be in use by receivers
 *	and must not be freed until after all the CPU's have gone
 *	through a quiescent state.
 */
void __dev_remove_pack(struct packet_type *pt)
{
	struct list_head *head;
	struct packet_type *pt1;

	spin_lock_bh(&ptype_lock);

S
Stephen Hemminger 已提交
372
	if (pt->type == htons(ETH_P_ALL))
L
Linus Torvalds 已提交
373
		head = &ptype_all;
S
Stephen Hemminger 已提交
374
	else
L
Linus Torvalds 已提交
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
		head = &ptype_base[ntohs(pt->type) & 15];

	list_for_each_entry(pt1, head, list) {
		if (pt == pt1) {
			list_del_rcu(&pt->list);
			goto out;
		}
	}

	printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
out:
	spin_unlock_bh(&ptype_lock);
}
/**
 *	dev_remove_pack	 - remove packet handler
 *	@pt: packet type declaration
 *
 *	Remove a protocol handler that was previously added to the kernel
 *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
 *	from the kernel lists and can be freed or reused once this function
 *	returns.
 *
 *	This call sleeps to guarantee that no CPU is looking at the packet
 *	type after return.
 */
void dev_remove_pack(struct packet_type *pt)
{
	__dev_remove_pack(pt);
403

L
Linus Torvalds 已提交
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
	synchronize_net();
}

/******************************************************************************

		      Device Boot-time Settings Routines

*******************************************************************************/

/* Boot time configuration table */
static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];

/**
 *	netdev_boot_setup_add	- add new setup entry
 *	@name: name of the device
 *	@map: configured settings for the device
 *
 *	Adds new setup entry to the dev_boot_setup list.  The function
 *	returns 0 on error and 1 on success.  This is a generic routine to
 *	all netdevices.
 */
static int netdev_boot_setup_add(char *name, struct ifmap *map)
{
	struct netdev_boot_setup *s;
	int i;

	s = dev_boot_setup;
	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
			memset(s[i].name, 0, sizeof(s[i].name));
			strcpy(s[i].name, name);
			memcpy(&s[i].map, map, sizeof(s[i].map));
			break;
		}
	}

	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
}

/**
 *	netdev_boot_setup_check	- check boot time settings
 *	@dev: the netdevice
 *
 * 	Check boot time settings for the device.
 *	The found settings are set for the device to be used
 *	later in the device probing.
 *	Returns 0 if no settings found, 1 if they are.
 */
int netdev_boot_setup_check(struct net_device *dev)
{
	struct netdev_boot_setup *s = dev_boot_setup;
	int i;

	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
		    !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
			dev->irq 	= s[i].map.irq;
			dev->base_addr 	= s[i].map.base_addr;
			dev->mem_start 	= s[i].map.mem_start;
			dev->mem_end 	= s[i].map.mem_end;
			return 1;
		}
	}
	return 0;
}


/**
 *	netdev_boot_base	- get address from boot time settings
 *	@prefix: prefix for network device
 *	@unit: id for network device
 *
 * 	Check boot time settings for the base address of device.
 *	The found settings are set for the device to be used
 *	later in the device probing.
 *	Returns 0 if no settings found.
 */
unsigned long netdev_boot_base(const char *prefix, int unit)
{
	const struct netdev_boot_setup *s = dev_boot_setup;
	char name[IFNAMSIZ];
	int i;

	sprintf(name, "%s%d", prefix, unit);

	/*
	 * If device already registered then return base of 1
	 * to indicate not to probe for this interface
	 */
	if (__dev_get_by_name(name))
		return 1;

	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
		if (!strcmp(name, s[i].name))
			return s[i].map.base_addr;
	return 0;
}

/*
 * Saves at boot time configured settings for any netdevice.
 */
int __init netdev_boot_setup(char *str)
{
	int ints[5];
	struct ifmap map;

	str = get_options(str, ARRAY_SIZE(ints), ints);
	if (!str || !*str)
		return 0;

	/* Save settings */
	memset(&map, 0, sizeof(map));
	if (ints[0] > 0)
		map.irq = ints[1];
	if (ints[0] > 1)
		map.base_addr = ints[2];
	if (ints[0] > 2)
		map.mem_start = ints[3];
	if (ints[0] > 3)
		map.mem_end = ints[4];

	/* Add new entry to the list */
	return netdev_boot_setup_add(str, &map);
}

__setup("netdev=", netdev_boot_setup);

/*******************************************************************************

			    Device Interface Subroutines

*******************************************************************************/

/**
 *	__dev_get_by_name	- find a device by its name
 *	@name: name to find
 *
 *	Find an interface by name. Must be called under RTNL semaphore
 *	or @dev_base_lock. If the name is found a pointer to the device
 *	is returned. If the name is not found then %NULL is returned. The
 *	reference counters are not incremented so the caller must be
 *	careful with locks.
 */

struct net_device *__dev_get_by_name(const char *name)
{
	struct hlist_node *p;

	hlist_for_each(p, dev_name_hash(name)) {
		struct net_device *dev
			= hlist_entry(p, struct net_device, name_hlist);
		if (!strncmp(dev->name, name, IFNAMSIZ))
			return dev;
	}
	return NULL;
}

/**
 *	dev_get_by_name		- find a device by its name
 *	@name: name to find
 *
 *	Find an interface by name. This can be called from any
 *	context and does its own locking. The returned handle has
 *	the usage count incremented and the caller must use dev_put() to
 *	release it when it is no longer needed. %NULL is returned if no
 *	matching device is found.
 */

struct net_device *dev_get_by_name(const char *name)
{
	struct net_device *dev;

	read_lock(&dev_base_lock);
	dev = __dev_get_by_name(name);
	if (dev)
		dev_hold(dev);
	read_unlock(&dev_base_lock);
	return dev;
}

/**
 *	__dev_get_by_index - find a device by its ifindex
 *	@ifindex: index of device
 *
 *	Search for an interface by index. Returns %NULL if the device
 *	is not found or a pointer to the device. The device has not
 *	had its reference counter increased so the caller must be careful
 *	about locking. The caller must hold either the RTNL semaphore
 *	or @dev_base_lock.
 */

struct net_device *__dev_get_by_index(int ifindex)
{
	struct hlist_node *p;

	hlist_for_each(p, dev_index_hash(ifindex)) {
		struct net_device *dev
			= hlist_entry(p, struct net_device, index_hlist);
		if (dev->ifindex == ifindex)
			return dev;
	}
	return NULL;
}


/**
 *	dev_get_by_index - find a device by its ifindex
 *	@ifindex: index of device
 *
 *	Search for an interface by index. Returns NULL if the device
 *	is not found or a pointer to the device. The device returned has
 *	had a reference added and the pointer is safe until the user calls
 *	dev_put to indicate they have finished with it.
 */

struct net_device *dev_get_by_index(int ifindex)
{
	struct net_device *dev;

	read_lock(&dev_base_lock);
	dev = __dev_get_by_index(ifindex);
	if (dev)
		dev_hold(dev);
	read_unlock(&dev_base_lock);
	return dev;
}

/**
 *	dev_getbyhwaddr - find a device by its hardware address
 *	@type: media type of device
 *	@ha: hardware address
 *
 *	Search for an interface by MAC address. Returns NULL if the device
 *	is not found or a pointer to the device. The caller must hold the
 *	rtnl semaphore. The returned device has not had its ref count increased
 *	and the caller must therefore be careful about locking
 *
 *	BUGS:
 *	If the API was consistent this would be __dev_get_by_hwaddr
 */

struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
{
	struct net_device *dev;

	ASSERT_RTNL();

651
	for_each_netdev(dev)
L
Linus Torvalds 已提交
652 653
		if (dev->type == type &&
		    !memcmp(dev->dev_addr, ha, dev->addr_len))
654 655 656
			return dev;

	return NULL;
L
Linus Torvalds 已提交
657 658
}

659 660
EXPORT_SYMBOL(dev_getbyhwaddr);

661
struct net_device *__dev_getfirstbyhwtype(unsigned short type)
L
Linus Torvalds 已提交
662 663 664
{
	struct net_device *dev;

665
	ASSERT_RTNL();
666
	for_each_netdev(dev)
667
		if (dev->type == type)
668 669 670
			return dev;

	return NULL;
671 672 673 674 675 676 677 678 679 680 681 682
}

EXPORT_SYMBOL(__dev_getfirstbyhwtype);

struct net_device *dev_getfirstbyhwtype(unsigned short type)
{
	struct net_device *dev;

	rtnl_lock();
	dev = __dev_getfirstbyhwtype(type);
	if (dev)
		dev_hold(dev);
L
Linus Torvalds 已提交
683 684 685 686 687 688 689 690 691 692 693 694
	rtnl_unlock();
	return dev;
}

EXPORT_SYMBOL(dev_getfirstbyhwtype);

/**
 *	dev_get_by_flags - find any device with given flags
 *	@if_flags: IFF_* values
 *	@mask: bitmask of bits in if_flags to check
 *
 *	Search for any interface with the given flags. Returns NULL if a device
695
 *	is not found or a pointer to the device. The device returned has
L
Linus Torvalds 已提交
696 697 698 699 700 701
 *	had a reference added and the pointer is safe until the user calls
 *	dev_put to indicate they have finished with it.
 */

struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
{
702
	struct net_device *dev, *ret;
L
Linus Torvalds 已提交
703

704
	ret = NULL;
L
Linus Torvalds 已提交
705
	read_lock(&dev_base_lock);
706
	for_each_netdev(dev) {
L
Linus Torvalds 已提交
707 708
		if (((dev->flags ^ if_flags) & mask) == 0) {
			dev_hold(dev);
709
			ret = dev;
L
Linus Torvalds 已提交
710 711 712 713
			break;
		}
	}
	read_unlock(&dev_base_lock);
714
	return ret;
L
Linus Torvalds 已提交
715 716 717 718 719 720 721
}

/**
 *	dev_valid_name - check if name is okay for network device
 *	@name: name string
 *
 *	Network device names need to be valid file names to
722 723
 *	to allow sysfs to work.  We also disallow any kind of
 *	whitespace.
L
Linus Torvalds 已提交
724
 */
725
int dev_valid_name(const char *name)
L
Linus Torvalds 已提交
726
{
727 728
	if (*name == '\0')
		return 0;
729 730
	if (strlen(name) >= IFNAMSIZ)
		return 0;
731 732 733 734 735 736 737 738 739
	if (!strcmp(name, ".") || !strcmp(name, ".."))
		return 0;

	while (*name) {
		if (*name == '/' || isspace(*name))
			return 0;
		name++;
	}
	return 1;
L
Linus Torvalds 已提交
740 741 742 743 744 745 746 747
}

/**
 *	dev_alloc_name - allocate a name for a device
 *	@dev: device
 *	@name: name format string
 *
 *	Passed a format string - eg "lt%d" it will try and find a suitable
S
Stephen Hemminger 已提交
748 749 750 751 752 753
 *	id. It scans list of devices to build up a free map, then chooses
 *	the first empty slot. The caller must hold the dev_base or rtnl lock
 *	while allocating the name and adding the device in order to avoid
 *	duplicates.
 *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
 *	Returns the number of the unit assigned or a negative errno code.
L
Linus Torvalds 已提交
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
 */

int dev_alloc_name(struct net_device *dev, const char *name)
{
	int i = 0;
	char buf[IFNAMSIZ];
	const char *p;
	const int max_netdevices = 8*PAGE_SIZE;
	long *inuse;
	struct net_device *d;

	p = strnchr(name, IFNAMSIZ-1, '%');
	if (p) {
		/*
		 * Verify the string as this thing may have come from
		 * the user.  There must be either one "%d" and no other "%"
		 * characters.
		 */
		if (p[1] != 'd' || strchr(p + 2, '%'))
			return -EINVAL;

		/* Use one page as a bit array of possible slots */
		inuse = (long *) get_zeroed_page(GFP_ATOMIC);
		if (!inuse)
			return -ENOMEM;

780
		for_each_netdev(d) {
L
Linus Torvalds 已提交
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
			if (!sscanf(d->name, name, &i))
				continue;
			if (i < 0 || i >= max_netdevices)
				continue;

			/*  avoid cases where sscanf is not exact inverse of printf */
			snprintf(buf, sizeof(buf), name, i);
			if (!strncmp(buf, d->name, IFNAMSIZ))
				set_bit(i, inuse);
		}

		i = find_first_zero_bit(inuse, max_netdevices);
		free_page((unsigned long) inuse);
	}

	snprintf(buf, sizeof(buf), name, i);
	if (!__dev_get_by_name(buf)) {
		strlcpy(dev->name, buf, IFNAMSIZ);
		return i;
	}

	/* It is possible to run out of possible slots
	 * when the name is long and there isn't enough space left
	 * for the digits, or if all bits are used.
	 */
	return -ENFILE;
}


/**
 *	dev_change_name - change name of a device
 *	@dev: device
 *	@newname: name (or format string) must be at least IFNAMSIZ
 *
 *	Change name of a device, can pass format strings "eth%d".
 *	for wildcarding.
 */
int dev_change_name(struct net_device *dev, char *newname)
{
820
	char oldname[IFNAMSIZ];
L
Linus Torvalds 已提交
821
	int err = 0;
822
	int ret;
L
Linus Torvalds 已提交
823 824 825 826 827 828 829 830 831

	ASSERT_RTNL();

	if (dev->flags & IFF_UP)
		return -EBUSY;

	if (!dev_valid_name(newname))
		return -EINVAL;

832 833
	memcpy(oldname, dev->name, IFNAMSIZ);

L
Linus Torvalds 已提交
834 835 836 837 838 839 840 841 842 843 844
	if (strchr(newname, '%')) {
		err = dev_alloc_name(dev, newname);
		if (err < 0)
			return err;
		strcpy(newname, dev->name);
	}
	else if (__dev_get_by_name(newname))
		return -EEXIST;
	else
		strlcpy(dev->name, newname, IFNAMSIZ);

845
rollback:
846
	device_rename(&dev->dev, dev->name);
847 848

	write_lock_bh(&dev_base_lock);
849 850
	hlist_del(&dev->name_hlist);
	hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
851 852
	write_unlock_bh(&dev_base_lock);

853 854 855 856 857 858 859 860 861 862 863 864 865 866
	ret = raw_notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
	ret = notifier_to_errno(ret);

	if (ret) {
		if (err) {
			printk(KERN_ERR
			       "%s: name change rollback failed: %d.\n",
			       dev->name, ret);
		} else {
			err = ret;
			memcpy(dev->name, oldname, IFNAMSIZ);
			goto rollback;
		}
	}
L
Linus Torvalds 已提交
867 868 869 870

	return err;
}

871
/**
S
Stephen Hemminger 已提交
872
 *	netdev_features_change - device changes features
873 874 875 876 877 878
 *	@dev: device to cause notification
 *
 *	Called to indicate a device has changed features.
 */
void netdev_features_change(struct net_device *dev)
{
879
	raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
880 881 882
}
EXPORT_SYMBOL(netdev_features_change);

L
Linus Torvalds 已提交
883 884 885 886 887 888 889 890 891 892 893
/**
 *	netdev_state_change - device changes state
 *	@dev: device to cause notification
 *
 *	Called to indicate a device has changed state. This function calls
 *	the notifier chains for netdev_chain and sends a NEWLINK message
 *	to the routing socket.
 */
void netdev_state_change(struct net_device *dev)
{
	if (dev->flags & IFF_UP) {
894
		raw_notifier_call_chain(&netdev_chain,
895
				NETDEV_CHANGE, dev);
L
Linus Torvalds 已提交
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
	}
}

/**
 *	dev_load 	- load a network module
 *	@name: name of interface
 *
 *	If a network interface is not present and the process has suitable
 *	privileges this function loads the module. If module loading is not
 *	available in this kernel then it becomes a nop.
 */

void dev_load(const char *name)
{
911
	struct net_device *dev;
L
Linus Torvalds 已提交
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967

	read_lock(&dev_base_lock);
	dev = __dev_get_by_name(name);
	read_unlock(&dev_base_lock);

	if (!dev && capable(CAP_SYS_MODULE))
		request_module("%s", name);
}

static int default_rebuild_header(struct sk_buff *skb)
{
	printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
	       skb->dev ? skb->dev->name : "NULL!!!");
	kfree_skb(skb);
	return 1;
}

/**
 *	dev_open	- prepare an interface for use.
 *	@dev:	device to open
 *
 *	Takes a device from down to up state. The device's private open
 *	function is invoked and then the multicast lists are loaded. Finally
 *	the device is moved into the up state and a %NETDEV_UP message is
 *	sent to the netdev notifier chain.
 *
 *	Calling this function on an active interface is a nop. On a failure
 *	a negative errno code is returned.
 */
int dev_open(struct net_device *dev)
{
	int ret = 0;

	/*
	 *	Is it already up?
	 */

	if (dev->flags & IFF_UP)
		return 0;

	/*
	 *	Is it even present?
	 */
	if (!netif_device_present(dev))
		return -ENODEV;

	/*
	 *	Call device private open method
	 */
	set_bit(__LINK_STATE_START, &dev->state);
	if (dev->open) {
		ret = dev->open(dev);
		if (ret)
			clear_bit(__LINK_STATE_START, &dev->state);
	}

968
	/*
L
Linus Torvalds 已提交
969 970 971 972 973 974 975 976 977 978 979 980
	 *	If it went open OK then:
	 */

	if (!ret) {
		/*
		 *	Set the flags.
		 */
		dev->flags |= IFF_UP;

		/*
		 *	Initialize multicasting status
		 */
981
		dev_set_rx_mode(dev);
L
Linus Torvalds 已提交
982 983 984 985 986 987 988 989 990

		/*
		 *	Wakeup transmit queue engine
		 */
		dev_activate(dev);

		/*
		 *	... and announce new interface.
		 */
991
		raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
L
Linus Torvalds 已提交
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
	}
	return ret;
}

/**
 *	dev_close - shutdown an interface.
 *	@dev: device to shutdown
 *
 *	This function moves an active device into down state. A
 *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
 *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
 *	chain.
 */
int dev_close(struct net_device *dev)
{
	if (!(dev->flags & IFF_UP))
		return 0;

	/*
	 *	Tell people we are going down, so that they can
	 *	prepare to death, when device is still operating.
	 */
1014
	raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
L
Linus Torvalds 已提交
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028

	dev_deactivate(dev);

	clear_bit(__LINK_STATE_START, &dev->state);

	/* Synchronize to scheduled poll. We cannot touch poll list,
	 * it can be even on different cpu. So just clear netif_running(),
	 * and wait when poll really will happen. Actually, the best place
	 * for this is inside dev->stop() after device stopped its irq
	 * engine, but this requires more changes in devices. */

	smp_mb__after_clear_bit(); /* Commit netif_running(). */
	while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
		/* No hurry. */
1029
		msleep(1);
L
Linus Torvalds 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
	}

	/*
	 *	Call the device specific close. This cannot fail.
	 *	Only if device is UP
	 *
	 *	We allow it to be called even after a DETACH hot-plug
	 *	event.
	 */
	if (dev->stop)
		dev->stop(dev);

	/*
	 *	Device is now down.
	 */

	dev->flags &= ~IFF_UP;

	/*
	 * Tell people we are down
	 */
1051
	raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
L
Linus Torvalds 已提交
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071

	return 0;
}


/*
 *	Device change register/unregister. These are not inline or static
 *	as we export them to the world.
 */

/**
 *	register_netdevice_notifier - register a network notifier block
 *	@nb: notifier
 *
 *	Register a notifier to be called when network device events occur.
 *	The notifier passed is linked into the kernel structures and must
 *	not be reused until it has been unregistered. A negative errno code
 *	is returned on a failure.
 *
 * 	When registered all registration and up events are replayed
1072
 *	to the new notifier to allow device to have a race free
L
Linus Torvalds 已提交
1073 1074 1075 1076 1077 1078
 *	view of the network device list.
 */

int register_netdevice_notifier(struct notifier_block *nb)
{
	struct net_device *dev;
1079
	struct net_device *last;
L
Linus Torvalds 已提交
1080 1081 1082
	int err;

	rtnl_lock();
1083
	err = raw_notifier_chain_register(&netdev_chain, nb);
1084 1085
	if (err)
		goto unlock;
L
Linus Torvalds 已提交
1086

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
	for_each_netdev(dev) {
		err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
		err = notifier_to_errno(err);
		if (err)
			goto rollback;

		if (!(dev->flags & IFF_UP))
			continue;

		nb->notifier_call(nb, NETDEV_UP, dev);
L
Linus Torvalds 已提交
1097
	}
1098 1099

unlock:
L
Linus Torvalds 已提交
1100 1101
	rtnl_unlock();
	return err;
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115

rollback:
	last = dev;
	for_each_netdev(dev) {
		if (dev == last)
			break;

		if (dev->flags & IFF_UP) {
			nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
			nb->notifier_call(nb, NETDEV_DOWN, dev);
		}
		nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
	}
	goto unlock;
L
Linus Torvalds 已提交
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
}

/**
 *	unregister_netdevice_notifier - unregister a network notifier block
 *	@nb: notifier
 *
 *	Unregister a notifier previously registered by
 *	register_netdevice_notifier(). The notifier is unlinked into the
 *	kernel structures and may then be reused. A negative errno code
 *	is returned on a failure.
 */

int unregister_netdevice_notifier(struct notifier_block *nb)
{
1130 1131 1132
	int err;

	rtnl_lock();
1133
	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1134 1135
	rtnl_unlock();
	return err;
L
Linus Torvalds 已提交
1136 1137 1138 1139 1140 1141 1142 1143
}

/**
 *	call_netdevice_notifiers - call all network notifier blocks
 *      @val: value passed unmodified to notifier function
 *      @v:   pointer passed unmodified to notifier function
 *
 *	Call all network notifier blocks.  Parameters and return value
1144
 *	are as for raw_notifier_call_chain().
L
Linus Torvalds 已提交
1145 1146 1147 1148
 */

int call_netdevice_notifiers(unsigned long val, void *v)
{
1149
	return raw_notifier_call_chain(&netdev_chain, val, v);
L
Linus Torvalds 已提交
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
}

/* When > 0 there are consumers of rx skb time stamps */
static atomic_t netstamp_needed = ATOMIC_INIT(0);

void net_enable_timestamp(void)
{
	atomic_inc(&netstamp_needed);
}

void net_disable_timestamp(void)
{
	atomic_dec(&netstamp_needed);
}

1165
static inline void net_timestamp(struct sk_buff *skb)
L
Linus Torvalds 已提交
1166 1167
{
	if (atomic_read(&netstamp_needed))
1168
		__net_timestamp(skb);
1169 1170
	else
		skb->tstamp.tv64 = 0;
L
Linus Torvalds 已提交
1171 1172 1173 1174 1175 1176 1177
}

/*
 *	Support routine. Sends outgoing frames to any network
 *	taps currently in use.
 */

1178
static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
1179 1180
{
	struct packet_type *ptype;
1181 1182

	net_timestamp(skb);
L
Linus Torvalds 已提交
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199

	rcu_read_lock();
	list_for_each_entry_rcu(ptype, &ptype_all, list) {
		/* Never send packets back to the socket
		 * they originated from - MvS (miquels@drinkel.ow.org)
		 */
		if ((ptype->dev == dev || !ptype->dev) &&
		    (ptype->af_packet_priv == NULL ||
		     (struct sock *)ptype->af_packet_priv != skb->sk)) {
			struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
			if (!skb2)
				break;

			/* skb->nh should be correctly
			   set by sender, so that the second statement is
			   just protection against buggy protocols.
			 */
1200
			skb_reset_mac_header(skb2);
L
Linus Torvalds 已提交
1201

1202
			if (skb_network_header(skb2) < skb2->data ||
1203
			    skb2->network_header > skb2->tail) {
L
Linus Torvalds 已提交
1204 1205 1206 1207
				if (net_ratelimit())
					printk(KERN_CRIT "protocol %04x is "
					       "buggy, dev %s\n",
					       skb2->protocol, dev->name);
1208
				skb_reset_network_header(skb2);
L
Linus Torvalds 已提交
1209 1210
			}

1211
			skb2->transport_header = skb2->network_header;
L
Linus Torvalds 已提交
1212
			skb2->pkt_type = PACKET_OUTGOING;
D
David S. Miller 已提交
1213
			ptype->func(skb2, skb->dev, ptype, skb->dev);
L
Linus Torvalds 已提交
1214 1215 1216 1217 1218
		}
	}
	rcu_read_unlock();
}

1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276

void __netif_schedule(struct net_device *dev)
{
	if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
		unsigned long flags;
		struct softnet_data *sd;

		local_irq_save(flags);
		sd = &__get_cpu_var(softnet_data);
		dev->next_sched = sd->output_queue;
		sd->output_queue = dev;
		raise_softirq_irqoff(NET_TX_SOFTIRQ);
		local_irq_restore(flags);
	}
}
EXPORT_SYMBOL(__netif_schedule);

void __netif_rx_schedule(struct net_device *dev)
{
	unsigned long flags;

	local_irq_save(flags);
	dev_hold(dev);
	list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
	if (dev->quota < 0)
		dev->quota += dev->weight;
	else
		dev->quota = dev->weight;
	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(__netif_rx_schedule);

void dev_kfree_skb_any(struct sk_buff *skb)
{
	if (in_irq() || irqs_disabled())
		dev_kfree_skb_irq(skb);
	else
		dev_kfree_skb(skb);
}
EXPORT_SYMBOL(dev_kfree_skb_any);


/* Hot-plugging. */
void netif_device_detach(struct net_device *dev)
{
	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
	    netif_running(dev)) {
		netif_stop_queue(dev);
	}
}
EXPORT_SYMBOL(netif_device_detach);

void netif_device_attach(struct net_device *dev)
{
	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
	    netif_running(dev)) {
		netif_wake_queue(dev);
1277
		__netdev_watchdog_up(dev);
1278 1279 1280 1281 1282
	}
}
EXPORT_SYMBOL(netif_device_attach);


L
Linus Torvalds 已提交
1283 1284 1285 1286
/*
 * Invalidate hardware checksum when packet is to be mangled, and
 * complete checksum manually on outgoing path.
 */
1287
int skb_checksum_help(struct sk_buff *skb)
L
Linus Torvalds 已提交
1288
{
1289
	__wsum csum;
1290
	int ret = 0, offset;
L
Linus Torvalds 已提交
1291

1292
	if (skb->ip_summed == CHECKSUM_COMPLETE)
1293 1294 1295 1296 1297
		goto out_set_summed;

	if (unlikely(skb_shinfo(skb)->gso_size)) {
		/* Let GSO fix up the checksum. */
		goto out_set_summed;
L
Linus Torvalds 已提交
1298 1299 1300 1301 1302 1303 1304 1305
	}

	if (skb_cloned(skb)) {
		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
		if (ret)
			goto out;
	}

1306
	offset = skb->csum_start - skb_headroom(skb);
1307
	BUG_ON(offset > (int)skb->len);
L
Linus Torvalds 已提交
1308 1309
	csum = skb_checksum(skb, offset, skb->len-offset, 0);

1310
	offset = skb_headlen(skb) - offset;
1311
	BUG_ON(offset <= 0);
A
Al Viro 已提交
1312
	BUG_ON(skb->csum_offset + 2 > offset);
L
Linus Torvalds 已提交
1313

1314 1315
	*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) =
		csum_fold(csum);
1316
out_set_summed:
L
Linus Torvalds 已提交
1317
	skb->ip_summed = CHECKSUM_NONE;
1318
out:
L
Linus Torvalds 已提交
1319 1320 1321
	return ret;
}

1322 1323 1324
/**
 *	skb_gso_segment - Perform segmentation on skb.
 *	@skb: buffer to segment
1325
 *	@features: features for the output path (see dev->features)
1326 1327
 *
 *	This function segments the given skb and returns a list of segments.
1328 1329 1330
 *
 *	It may return NULL if the skb requires no segmentation.  This is
 *	only possible when GSO is used for verifying header integrity.
1331
 */
1332
struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1333 1334 1335
{
	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
	struct packet_type *ptype;
A
Al Viro 已提交
1336
	__be16 type = skb->protocol;
1337
	int err;
1338 1339 1340

	BUG_ON(skb_shinfo(skb)->frag_list);

1341
	skb_reset_mac_header(skb);
1342
	skb->mac_len = skb->network_header - skb->mac_header;
1343 1344
	__skb_pull(skb, skb->mac_len);

1345
	if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1346 1347 1348 1349 1350
		if (skb_header_cloned(skb) &&
		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
			return ERR_PTR(err);
	}

1351 1352 1353
	rcu_read_lock();
	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1354
			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1355 1356 1357 1358
				err = ptype->gso_send_check(skb);
				segs = ERR_PTR(err);
				if (err || skb_gso_ok(skb, features))
					break;
1359 1360
				__skb_push(skb, (skb->data -
						 skb_network_header(skb)));
1361
			}
1362
			segs = ptype->gso_segment(skb, features);
1363 1364 1365 1366 1367
			break;
		}
	}
	rcu_read_unlock();

1368
	__skb_push(skb, skb->data - skb_mac_header(skb));
1369

1370 1371 1372 1373 1374
	return segs;
}

EXPORT_SYMBOL(skb_gso_segment);

1375 1376 1377 1378 1379
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
void netdev_rx_csum_fault(struct net_device *dev)
{
	if (net_ratelimit()) {
1380
		printk(KERN_ERR "%s: hw csum failure.\n",
1381
			dev ? dev->name : "<unknown>");
1382 1383 1384 1385 1386 1387
		dump_stack();
	}
}
EXPORT_SYMBOL(netdev_rx_csum_fault);
#endif

L
Linus Torvalds 已提交
1388 1389 1390 1391 1392 1393 1394
/* Actually, we should eliminate this check as soon as we know, that:
 * 1. IOMMU is present and allows to map all the memory.
 * 2. No high memory really exists on this machine.
 */

static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
{
1395
#ifdef CONFIG_HIGHMEM
L
Linus Torvalds 已提交
1396 1397 1398 1399 1400 1401 1402 1403 1404
	int i;

	if (dev->features & NETIF_F_HIGHDMA)
		return 0;

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
		if (PageHighMem(skb_shinfo(skb)->frags[i].page))
			return 1;

1405
#endif
L
Linus Torvalds 已提交
1406 1407 1408
	return 0;
}

1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442
struct dev_gso_cb {
	void (*destructor)(struct sk_buff *skb);
};

#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)

static void dev_gso_skb_destructor(struct sk_buff *skb)
{
	struct dev_gso_cb *cb;

	do {
		struct sk_buff *nskb = skb->next;

		skb->next = nskb->next;
		nskb->next = NULL;
		kfree_skb(nskb);
	} while (skb->next);

	cb = DEV_GSO_CB(skb);
	if (cb->destructor)
		cb->destructor(skb);
}

/**
 *	dev_gso_segment - Perform emulated hardware segmentation on skb.
 *	@skb: buffer to segment
 *
 *	This function segments the given skb and stores the list of segments
 *	in skb->next.
 */
static int dev_gso_segment(struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	struct sk_buff *segs;
1443 1444 1445 1446 1447 1448 1449 1450
	int features = dev->features & ~(illegal_highdma(dev, skb) ?
					 NETIF_F_SG : 0);

	segs = skb_gso_segment(skb, features);

	/* Verifying header integrity only. */
	if (!segs)
		return 0;
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464

	if (unlikely(IS_ERR(segs)))
		return PTR_ERR(segs);

	skb->next = segs;
	DEV_GSO_CB(skb)->destructor = skb->destructor;
	skb->destructor = dev_gso_skb_destructor;

	return 0;
}

int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	if (likely(!skb->next)) {
S
Stephen Hemminger 已提交
1465
		if (!list_empty(&ptype_all))
1466 1467
			dev_queue_xmit_nit(skb, dev);

1468 1469 1470 1471 1472 1473
		if (netif_needs_gso(dev, skb)) {
			if (unlikely(dev_gso_segment(skb)))
				goto out_kfree_skb;
			if (skb->next)
				goto gso;
		}
1474

1475
		return dev->hard_start_xmit(skb, dev);
1476 1477
	}

1478
gso:
1479 1480 1481 1482 1483 1484 1485 1486
	do {
		struct sk_buff *nskb = skb->next;
		int rc;

		skb->next = nskb->next;
		nskb->next = NULL;
		rc = dev->hard_start_xmit(nskb, dev);
		if (unlikely(rc)) {
1487
			nskb->next = skb->next;
1488 1489 1490
			skb->next = nskb;
			return rc;
		}
1491 1492 1493
		if (unlikely((netif_queue_stopped(dev) ||
			     netif_subqueue_stopped(dev, skb->queue_mapping)) &&
			     skb->next))
1494
			return NETDEV_TX_BUSY;
1495
	} while (skb->next);
1496

1497 1498 1499 1500 1501 1502 1503
	skb->destructor = DEV_GSO_CB(skb)->destructor;

out_kfree_skb:
	kfree_skb(skb);
	return 0;
}

L
Linus Torvalds 已提交
1504 1505
#define HARD_TX_LOCK(dev, cpu) {			\
	if ((dev->features & NETIF_F_LLTX) == 0) {	\
H
Herbert Xu 已提交
1506
		netif_tx_lock(dev);			\
L
Linus Torvalds 已提交
1507 1508 1509 1510 1511
	}						\
}

#define HARD_TX_UNLOCK(dev) {				\
	if ((dev->features & NETIF_F_LLTX) == 0) {	\
H
Herbert Xu 已提交
1512
		netif_tx_unlock(dev);			\
L
Linus Torvalds 已提交
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
	}						\
}

/**
 *	dev_queue_xmit - transmit a buffer
 *	@skb: buffer to transmit
 *
 *	Queue a buffer for transmission to a network device. The caller must
 *	have set the device and priority and built the buffer before calling
 *	this function. The function can be called from an interrupt.
 *
 *	A negative errno code is returned on a failure. A success does not
 *	guarantee the frame will be transmitted as it may be dropped due
 *	to congestion or traffic shaping.
1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
 *
 * -----------------------------------------------------------------------------------
 *      I notice this method can also return errors from the queue disciplines,
 *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
 *      be positive.
 *
 *      Regardless of the return value, the skb is consumed, so it is currently
 *      difficult to retry a send to this method.  (You can bump the ref count
 *      before sending to hold a reference for retry if you are careful.)
 *
 *      When calling this method, interrupts MUST be enabled.  This is because
 *      the BH enable code must have IRQs enabled so that it will not deadlock.
 *          --BLG
L
Linus Torvalds 已提交
1540 1541 1542 1543 1544 1545 1546 1547
 */

int dev_queue_xmit(struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	struct Qdisc *q;
	int rc = -ENOMEM;

1548 1549 1550 1551
	/* GSO will handle the following emulations directly. */
	if (netif_needs_gso(dev, skb))
		goto gso;

L
Linus Torvalds 已提交
1552 1553
	if (skb_shinfo(skb)->frag_list &&
	    !(dev->features & NETIF_F_FRAGLIST) &&
H
Herbert Xu 已提交
1554
	    __skb_linearize(skb))
L
Linus Torvalds 已提交
1555 1556 1557 1558 1559 1560 1561 1562
		goto out_kfree_skb;

	/* Fragmented skb is linearized if device does not support SG,
	 * or if at least one of fragments is in highmem and device
	 * does not support DMA from it.
	 */
	if (skb_shinfo(skb)->nr_frags &&
	    (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
H
Herbert Xu 已提交
1563
	    __skb_linearize(skb))
L
Linus Torvalds 已提交
1564 1565 1566 1567 1568
		goto out_kfree_skb;

	/* If packet is not checksummed and device does not support
	 * checksumming for this protocol, complete checksumming here.
	 */
1569 1570 1571 1572
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		skb_set_transport_header(skb, skb->csum_start -
					      skb_headroom(skb));

1573 1574 1575 1576 1577
		if (!(dev->features & NETIF_F_GEN_CSUM) &&
		    !((dev->features & NETIF_F_IP_CSUM) &&
		      skb->protocol == htons(ETH_P_IP)) &&
		    !((dev->features & NETIF_F_IPV6_CSUM) &&
		      skb->protocol == htons(ETH_P_IPV6)))
1578 1579 1580
			if (skb_checksum_help(skb))
				goto out_kfree_skb;
	}
L
Linus Torvalds 已提交
1581

1582
gso:
1583 1584
	spin_lock_prefetch(&dev->queue_lock);

1585 1586
	/* Disable soft irqs for various locks below. Also
	 * stops preemption for RCU.
L
Linus Torvalds 已提交
1587
	 */
1588
	rcu_read_lock_bh();
L
Linus Torvalds 已提交
1589

1590 1591 1592
	/* Updates of qdisc are serialized by queue_lock.
	 * The struct Qdisc which is pointed to by qdisc is now a
	 * rcu structure - it may be accessed without acquiring
L
Linus Torvalds 已提交
1593
	 * a lock (but the structure may be stale.) The freeing of the
1594
	 * qdisc will be deferred until it's known that there are no
L
Linus Torvalds 已提交
1595
	 * more references to it.
1596 1597
	 *
	 * If the qdisc has an enqueue function, we still need to
L
Linus Torvalds 已提交
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
	 * hold the queue_lock before calling it, since queue_lock
	 * also serializes access to the device queue.
	 */

	q = rcu_dereference(dev->qdisc);
#ifdef CONFIG_NET_CLS_ACT
	skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
#endif
	if (q->enqueue) {
		/* Grab device queue */
		spin_lock(&dev->queue_lock);
1609 1610
		q = dev->qdisc;
		if (q->enqueue) {
1611 1612
			/* reset queue_mapping to zero */
			skb->queue_mapping = 0;
1613 1614 1615
			rc = q->enqueue(skb, q);
			qdisc_run(dev);
			spin_unlock(&dev->queue_lock);
L
Linus Torvalds 已提交
1616

1617 1618 1619
			rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
			goto out;
		}
L
Linus Torvalds 已提交
1620 1621 1622 1623 1624 1625
		spin_unlock(&dev->queue_lock);
	}

	/* The device has no queue. Common case for software devices:
	   loopback, all the sorts of tunnels...

H
Herbert Xu 已提交
1626 1627
	   Really, it is unlikely that netif_tx_lock protection is necessary
	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
L
Linus Torvalds 已提交
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
	   counters.)
	   However, it is possible, that they rely on protection
	   made by us here.

	   Check this and shot the lock. It is not prone from deadlocks.
	   Either shot noqueue qdisc, it is even simpler 8)
	 */
	if (dev->flags & IFF_UP) {
		int cpu = smp_processor_id(); /* ok because BHs are off */

		if (dev->xmit_lock_owner != cpu) {

			HARD_TX_LOCK(dev, cpu);

1642 1643
			if (!netif_queue_stopped(dev) &&
			    !netif_subqueue_stopped(dev, skb->queue_mapping)) {
L
Linus Torvalds 已提交
1644
				rc = 0;
1645
				if (!dev_hard_start_xmit(skb, dev)) {
L
Linus Torvalds 已提交
1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
					HARD_TX_UNLOCK(dev);
					goto out;
				}
			}
			HARD_TX_UNLOCK(dev);
			if (net_ratelimit())
				printk(KERN_CRIT "Virtual device %s asks to "
				       "queue packet!\n", dev->name);
		} else {
			/* Recursion is detected! It is possible,
			 * unfortunately */
			if (net_ratelimit())
				printk(KERN_CRIT "Dead loop on virtual device "
				       "%s, fix it urgently!\n", dev->name);
		}
	}

	rc = -ENETDOWN;
1664
	rcu_read_unlock_bh();
L
Linus Torvalds 已提交
1665 1666 1667 1668 1669

out_kfree_skb:
	kfree_skb(skb);
	return rc;
out:
1670
	rcu_read_unlock_bh();
L
Linus Torvalds 已提交
1671 1672 1673 1674 1675 1676 1677 1678
	return rc;
}


/*=======================================================================
			Receiver routines
  =======================================================================*/

1679 1680 1681
int netdev_max_backlog __read_mostly = 1000;
int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64;            /* old backlog weight */
L
Linus Torvalds 已提交
1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712

DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };


/**
 *	netif_rx	-	post buffer to the network code
 *	@skb: buffer to post
 *
 *	This function receives a packet from a device driver and queues it for
 *	the upper (protocol) levels to process.  It always succeeds. The buffer
 *	may be dropped during processing for congestion control or by the
 *	protocol layers.
 *
 *	return values:
 *	NET_RX_SUCCESS	(no congestion)
 *	NET_RX_CN_LOW   (low congestion)
 *	NET_RX_CN_MOD   (moderate congestion)
 *	NET_RX_CN_HIGH  (high congestion)
 *	NET_RX_DROP     (packet was dropped)
 *
 */

int netif_rx(struct sk_buff *skb)
{
	struct softnet_data *queue;
	unsigned long flags;

	/* if netpoll wants it, pretend we never saw it */
	if (netpoll_rx(skb))
		return NET_RX_DROP;

1713
	if (!skb->tstamp.tv64)
1714
		net_timestamp(skb);
L
Linus Torvalds 已提交
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729

	/*
	 * The code is rearranged so that the path is the most
	 * short when CPU is congested, but is still operating.
	 */
	local_irq_save(flags);
	queue = &__get_cpu_var(softnet_data);

	__get_cpu_var(netdev_rx_stat).total++;
	if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
		if (queue->input_pkt_queue.qlen) {
enqueue:
			dev_hold(skb->dev);
			__skb_queue_tail(&queue->input_pkt_queue, skb);
			local_irq_restore(flags);
1730
			return NET_RX_SUCCESS;
L
Linus Torvalds 已提交
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
		}

		netif_rx_schedule(&queue->backlog_dev);
		goto enqueue;
	}

	__get_cpu_var(netdev_rx_stat).dropped++;
	local_irq_restore(flags);

	kfree_skb(skb);
	return NET_RX_DROP;
}

int netif_rx_ni(struct sk_buff *skb)
{
	int err;

	preempt_disable();
	err = netif_rx(skb);
	if (local_softirq_pending())
		do_softirq();
	preempt_enable();

	return err;
}

EXPORT_SYMBOL(netif_rx_ni);

D
David S. Miller 已提交
1759
static inline struct net_device *skb_bond(struct sk_buff *skb)
L
Linus Torvalds 已提交
1760 1761 1762
{
	struct net_device *dev = skb->dev;

1763
	if (dev->master) {
1764
		if (skb_bond_should_drop(skb)) {
1765 1766 1767
			kfree_skb(skb);
			return NULL;
		}
L
Linus Torvalds 已提交
1768
		skb->dev = dev->master;
1769
	}
D
David S. Miller 已提交
1770 1771

	return dev;
L
Linus Torvalds 已提交
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
}

static void net_tx_action(struct softirq_action *h)
{
	struct softnet_data *sd = &__get_cpu_var(softnet_data);

	if (sd->completion_queue) {
		struct sk_buff *clist;

		local_irq_disable();
		clist = sd->completion_queue;
		sd->completion_queue = NULL;
		local_irq_enable();

		while (clist) {
			struct sk_buff *skb = clist;
			clist = clist->next;

			BUG_TRAP(!atomic_read(&skb->users));
			__kfree_skb(skb);
		}
	}

	if (sd->output_queue) {
		struct net_device *head;

		local_irq_disable();
		head = sd->output_queue;
		sd->output_queue = NULL;
		local_irq_enable();

		while (head) {
			struct net_device *dev = head;
			head = head->next_sched;

			smp_mb__before_clear_bit();
			clear_bit(__LINK_STATE_SCHED, &dev->state);

			if (spin_trylock(&dev->queue_lock)) {
				qdisc_run(dev);
				spin_unlock(&dev->queue_lock);
			} else {
				netif_schedule(dev);
			}
		}
	}
}

1820 1821 1822
static inline int deliver_skb(struct sk_buff *skb,
			      struct packet_type *pt_prev,
			      struct net_device *orig_dev)
L
Linus Torvalds 已提交
1823 1824
{
	atomic_inc(&skb->users);
D
David S. Miller 已提交
1825
	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
L
Linus Torvalds 已提交
1826 1827 1828
}

#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1829
/* These hooks defined here for ATM */
L
Linus Torvalds 已提交
1830 1831 1832
struct net_bridge;
struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
						unsigned char *addr);
1833
void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
L
Linus Torvalds 已提交
1834

1835 1836 1837 1838 1839 1840 1841 1842 1843
/*
 * If bridge module is loaded call bridging hook.
 *  returns NULL if packet was consumed.
 */
struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
					struct sk_buff *skb) __read_mostly;
static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
					    struct packet_type **pt_prev, int *ret,
					    struct net_device *orig_dev)
L
Linus Torvalds 已提交
1844 1845 1846
{
	struct net_bridge_port *port;

1847 1848 1849
	if (skb->pkt_type == PACKET_LOOPBACK ||
	    (port = rcu_dereference(skb->dev->br_port)) == NULL)
		return skb;
L
Linus Torvalds 已提交
1850 1851

	if (*pt_prev) {
1852
		*ret = deliver_skb(skb, *pt_prev, orig_dev);
L
Linus Torvalds 已提交
1853
		*pt_prev = NULL;
1854 1855
	}

1856
	return br_handle_frame_hook(port, skb);
L
Linus Torvalds 已提交
1857 1858
}
#else
1859
#define handle_bridge(skb, pt_prev, ret, orig_dev)	(skb)
L
Linus Torvalds 已提交
1860 1861
#endif

P
Patrick McHardy 已提交
1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);

static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
					     struct packet_type **pt_prev,
					     int *ret,
					     struct net_device *orig_dev)
{
	if (skb->dev->macvlan_port == NULL)
		return skb;

	if (*pt_prev) {
		*ret = deliver_skb(skb, *pt_prev, orig_dev);
		*pt_prev = NULL;
	}
	return macvlan_handle_frame_hook(skb);
}
#else
#define handle_macvlan(skb, pt_prev, ret, orig_dev)	(skb)
#endif

L
Linus Torvalds 已提交
1884 1885 1886 1887 1888
#ifdef CONFIG_NET_CLS_ACT
/* TODO: Maybe we should just force sch_ingress to be compiled in
 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
 * a compare and 2 stores extra right now if we dont have it on
 * but have CONFIG_NET_CLS_ACT
1889
 * NOTE: This doesnt stop any functionality; if you dont have
L
Linus Torvalds 已提交
1890 1891 1892
 * the ingress scheduler, you just cant add policies on ingress.
 *
 */
1893
static int ing_filter(struct sk_buff *skb)
L
Linus Torvalds 已提交
1894 1895 1896 1897
{
	struct Qdisc *q;
	struct net_device *dev = skb->dev;
	int result = TC_ACT_OK;
1898

L
Linus Torvalds 已提交
1899 1900 1901
	if (dev->qdisc_ingress) {
		__u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
		if (MAX_RED_LOOP < ttl++) {
1902 1903
			printk(KERN_WARNING "Redir loop detected Dropping packet (%d->%d)\n",
				skb->iif, skb->dev->ifindex);
L
Linus Torvalds 已提交
1904 1905 1906 1907 1908 1909
			return TC_ACT_SHOT;
		}

		skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);

		skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1910

1911
		spin_lock(&dev->ingress_lock);
L
Linus Torvalds 已提交
1912 1913
		if ((q = dev->qdisc_ingress) != NULL)
			result = q->enqueue(skb, q);
1914
		spin_unlock(&dev->ingress_lock);
L
Linus Torvalds 已提交
1915 1916 1917 1918 1919 1920 1921 1922 1923 1924

	}

	return result;
}
#endif

int netif_receive_skb(struct sk_buff *skb)
{
	struct packet_type *ptype, *pt_prev;
D
David S. Miller 已提交
1925
	struct net_device *orig_dev;
L
Linus Torvalds 已提交
1926
	int ret = NET_RX_DROP;
A
Al Viro 已提交
1927
	__be16 type;
L
Linus Torvalds 已提交
1928 1929 1930 1931 1932

	/* if we've gotten here through NAPI, check netpoll */
	if (skb->dev->poll && netpoll_rx(skb))
		return NET_RX_DROP;

1933
	if (!skb->tstamp.tv64)
1934
		net_timestamp(skb);
L
Linus Torvalds 已提交
1935

1936 1937
	if (!skb->iif)
		skb->iif = skb->dev->ifindex;
1938

D
David S. Miller 已提交
1939
	orig_dev = skb_bond(skb);
L
Linus Torvalds 已提交
1940

1941 1942 1943
	if (!orig_dev)
		return NET_RX_DROP;

L
Linus Torvalds 已提交
1944 1945
	__get_cpu_var(netdev_rx_stat).total++;

1946
	skb_reset_network_header(skb);
1947
	skb_reset_transport_header(skb);
1948
	skb->mac_len = skb->network_header - skb->mac_header;
L
Linus Torvalds 已提交
1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962

	pt_prev = NULL;

	rcu_read_lock();

#ifdef CONFIG_NET_CLS_ACT
	if (skb->tc_verd & TC_NCLS) {
		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
		goto ncls;
	}
#endif

	list_for_each_entry_rcu(ptype, &ptype_all, list) {
		if (!ptype->dev || ptype->dev == skb->dev) {
1963
			if (pt_prev)
D
David S. Miller 已提交
1964
				ret = deliver_skb(skb, pt_prev, orig_dev);
L
Linus Torvalds 已提交
1965 1966 1967 1968 1969 1970
			pt_prev = ptype;
		}
	}

#ifdef CONFIG_NET_CLS_ACT
	if (pt_prev) {
D
David S. Miller 已提交
1971
		ret = deliver_skb(skb, pt_prev, orig_dev);
L
Linus Torvalds 已提交
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987
		pt_prev = NULL; /* noone else should process this after*/
	} else {
		skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
	}

	ret = ing_filter(skb);

	if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
		kfree_skb(skb);
		goto out;
	}

	skb->tc_verd = 0;
ncls:
#endif

1988
	skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
P
Patrick McHardy 已提交
1989 1990 1991
	if (!skb)
		goto out;
	skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
1992
	if (!skb)
L
Linus Torvalds 已提交
1993 1994 1995 1996 1997 1998
		goto out;

	type = skb->protocol;
	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
		if (ptype->type == type &&
		    (!ptype->dev || ptype->dev == skb->dev)) {
1999
			if (pt_prev)
D
David S. Miller 已提交
2000
				ret = deliver_skb(skb, pt_prev, orig_dev);
L
Linus Torvalds 已提交
2001 2002 2003 2004 2005
			pt_prev = ptype;
		}
	}

	if (pt_prev) {
D
David S. Miller 已提交
2006
		ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
L
Linus Torvalds 已提交
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
	} else {
		kfree_skb(skb);
		/* Jamal, now you will not able to escape explaining
		 * me how you were going to use this. :-)
		 */
		ret = NET_RX_DROP;
	}

out:
	rcu_read_unlock();
	return ret;
}

static int process_backlog(struct net_device *backlog_dev, int *budget)
{
	int work = 0;
	int quota = min(backlog_dev->quota, *budget);
	struct softnet_data *queue = &__get_cpu_var(softnet_data);
	unsigned long start_time = jiffies;

2027
	backlog_dev->weight = weight_p;
L
Linus Torvalds 已提交
2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
	for (;;) {
		struct sk_buff *skb;
		struct net_device *dev;

		local_irq_disable();
		skb = __skb_dequeue(&queue->input_pkt_queue);
		if (!skb)
			goto job_done;
		local_irq_enable();

		dev = skb->dev;

		netif_receive_skb(skb);

		dev_put(dev);

		work++;

		if (work >= quota || jiffies - start_time > 1)
			break;

	}

	backlog_dev->quota -= work;
	*budget -= work;
	return -1;

job_done:
	backlog_dev->quota -= work;
	*budget -= work;

	list_del(&backlog_dev->poll_list);
	smp_mb__before_clear_bit();
	netif_poll_enable(backlog_dev);

	local_irq_enable();
	return 0;
}

static void net_rx_action(struct softirq_action *h)
{
	struct softnet_data *queue = &__get_cpu_var(softnet_data);
	unsigned long start_time = jiffies;
2071
	int budget = netdev_budget;
2072 2073
	void *have;

L
Linus Torvalds 已提交
2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
	local_irq_disable();

	while (!list_empty(&queue->poll_list)) {
		struct net_device *dev;

		if (budget <= 0 || jiffies - start_time > 1)
			goto softnet_break;

		local_irq_enable();

		dev = list_entry(queue->poll_list.next,
				 struct net_device, poll_list);
2086
		have = netpoll_poll_lock(dev);
L
Linus Torvalds 已提交
2087 2088

		if (dev->quota <= 0 || dev->poll(dev, &budget)) {
2089
			netpoll_poll_unlock(have);
L
Linus Torvalds 已提交
2090
			local_irq_disable();
2091
			list_move_tail(&dev->poll_list, &queue->poll_list);
L
Linus Torvalds 已提交
2092 2093 2094 2095 2096
			if (dev->quota < 0)
				dev->quota += dev->weight;
			else
				dev->quota = dev->weight;
		} else {
2097
			netpoll_poll_unlock(have);
L
Linus Torvalds 已提交
2098 2099 2100 2101 2102
			dev_put(dev);
			local_irq_disable();
		}
	}
out:
2103
	local_irq_enable();
2104 2105 2106 2107 2108
#ifdef CONFIG_NET_DMA
	/*
	 * There may not be any more sk_buffs coming right now, so push
	 * any pending DMA copies to hardware
	 */
2109 2110 2111 2112 2113 2114 2115
	if (!cpus_empty(net_dma.channel_mask)) {
		int chan_idx;
		for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
			struct dma_chan *chan = net_dma.channels[chan_idx];
			if (chan)
				dma_async_memcpy_issue_pending(chan);
		}
2116 2117
	}
#endif
L
Linus Torvalds 已提交
2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213
	return;

softnet_break:
	__get_cpu_var(netdev_rx_stat).time_squeeze++;
	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
	goto out;
}

static gifconf_func_t * gifconf_list [NPROTO];

/**
 *	register_gifconf	-	register a SIOCGIF handler
 *	@family: Address family
 *	@gifconf: Function handler
 *
 *	Register protocol dependent address dumping routines. The handler
 *	that is passed must not be freed or reused until it has been replaced
 *	by another handler.
 */
int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
{
	if (family >= NPROTO)
		return -EINVAL;
	gifconf_list[family] = gifconf;
	return 0;
}


/*
 *	Map an interface index to its name (SIOCGIFNAME)
 */

/*
 *	We need this ioctl for efficient implementation of the
 *	if_indextoname() function required by the IPv6 API.  Without
 *	it, we would have to search all the interfaces to find a
 *	match.  --pb
 */

static int dev_ifname(struct ifreq __user *arg)
{
	struct net_device *dev;
	struct ifreq ifr;

	/*
	 *	Fetch the caller's info block.
	 */

	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
		return -EFAULT;

	read_lock(&dev_base_lock);
	dev = __dev_get_by_index(ifr.ifr_ifindex);
	if (!dev) {
		read_unlock(&dev_base_lock);
		return -ENODEV;
	}

	strcpy(ifr.ifr_name, dev->name);
	read_unlock(&dev_base_lock);

	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
		return -EFAULT;
	return 0;
}

/*
 *	Perform a SIOCGIFCONF call. This structure will change
 *	size eventually, and there is nothing I can do about it.
 *	Thus we will need a 'compatibility mode'.
 */

static int dev_ifconf(char __user *arg)
{
	struct ifconf ifc;
	struct net_device *dev;
	char __user *pos;
	int len;
	int total;
	int i;

	/*
	 *	Fetch the caller's info block.
	 */

	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
		return -EFAULT;

	pos = ifc.ifc_buf;
	len = ifc.ifc_len;

	/*
	 *	Loop over the interfaces, and write an info block for each.
	 */

	total = 0;
2214
	for_each_netdev(dev) {
L
Linus Torvalds 已提交
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
		for (i = 0; i < NPROTO; i++) {
			if (gifconf_list[i]) {
				int done;
				if (!pos)
					done = gifconf_list[i](dev, NULL, 0);
				else
					done = gifconf_list[i](dev, pos + total,
							       len - total);
				if (done < 0)
					return -EFAULT;
				total += done;
			}
		}
2228
	}
L
Linus Torvalds 已提交
2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245

	/*
	 *	All done.  Write the updated control block back to the caller.
	 */
	ifc.ifc_len = total;

	/*
	 * 	Both BSD and Solaris return 0 here, so we do too.
	 */
	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
}

#ifdef CONFIG_PROC_FS
/*
 *	This is invoked by the /proc filesystem handler to display a device
 *	in detail.
 */
2246
void *dev_seq_start(struct seq_file *seq, loff_t *pos)
L
Linus Torvalds 已提交
2247
{
2248
	loff_t off;
L
Linus Torvalds 已提交
2249 2250
	struct net_device *dev;

2251 2252 2253
	read_lock(&dev_base_lock);
	if (!*pos)
		return SEQ_START_TOKEN;
L
Linus Torvalds 已提交
2254

2255 2256 2257 2258
	off = 1;
	for_each_netdev(dev)
		if (off++ == *pos)
			return dev;
L
Linus Torvalds 已提交
2259

2260
	return NULL;
L
Linus Torvalds 已提交
2261 2262 2263 2264 2265
}

void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
2266 2267
	return v == SEQ_START_TOKEN ?
		first_net_device() : next_net_device((struct net_device *)v);
L
Linus Torvalds 已提交
2268 2269 2270 2271 2272 2273 2274 2275 2276
}

void dev_seq_stop(struct seq_file *seq, void *v)
{
	read_unlock(&dev_base_lock);
}

static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
{
R
Rusty Russell 已提交
2277
	struct net_device_stats *stats = dev->get_stats(dev);
L
Linus Torvalds 已提交
2278

2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
	seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
		   "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
		   dev->name, stats->rx_bytes, stats->rx_packets,
		   stats->rx_errors,
		   stats->rx_dropped + stats->rx_missed_errors,
		   stats->rx_fifo_errors,
		   stats->rx_length_errors + stats->rx_over_errors +
		    stats->rx_crc_errors + stats->rx_frame_errors,
		   stats->rx_compressed, stats->multicast,
		   stats->tx_bytes, stats->tx_packets,
		   stats->tx_errors, stats->tx_dropped,
		   stats->tx_fifo_errors, stats->collisions,
		   stats->tx_carrier_errors +
		    stats->tx_aborted_errors +
		    stats->tx_window_errors +
		    stats->tx_heartbeat_errors,
		   stats->tx_compressed);
L
Linus Torvalds 已提交
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
}

/*
 *	Called from the PROCfs module. This now uses the new arbitrary sized
 *	/proc/net interface to create /proc/net/dev
 */
static int dev_seq_show(struct seq_file *seq, void *v)
{
	if (v == SEQ_START_TOKEN)
		seq_puts(seq, "Inter-|   Receive                            "
			      "                    |  Transmit\n"
			      " face |bytes    packets errs drop fifo frame "
			      "compressed multicast|bytes    packets errs "
			      "drop fifo colls carrier compressed\n");
	else
		dev_seq_printf_stats(seq, v);
	return 0;
}

static struct netif_rx_stats *softnet_get_online(loff_t *pos)
{
	struct netif_rx_stats *rc = NULL;

	while (*pos < NR_CPUS)
2320
		if (cpu_online(*pos)) {
L
Linus Torvalds 已提交
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347
			rc = &per_cpu(netdev_rx_stat, *pos);
			break;
		} else
			++*pos;
	return rc;
}

static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
{
	return softnet_get_online(pos);
}

static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return softnet_get_online(pos);
}

static void softnet_seq_stop(struct seq_file *seq, void *v)
{
}

static int softnet_seq_show(struct seq_file *seq, void *v)
{
	struct netif_rx_stats *s = v;

	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2348
		   s->total, s->dropped, s->time_squeeze, 0,
2349 2350
		   0, 0, 0, 0, /* was fastroute */
		   s->cpu_collision );
L
Linus Torvalds 已提交
2351 2352 2353
	return 0;
}

2354
static const struct seq_operations dev_seq_ops = {
L
Linus Torvalds 已提交
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
	.start = dev_seq_start,
	.next  = dev_seq_next,
	.stop  = dev_seq_stop,
	.show  = dev_seq_show,
};

static int dev_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &dev_seq_ops);
}

2366
static const struct file_operations dev_seq_fops = {
L
Linus Torvalds 已提交
2367 2368 2369 2370 2371 2372 2373
	.owner	 = THIS_MODULE,
	.open    = dev_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

2374
static const struct seq_operations softnet_seq_ops = {
L
Linus Torvalds 已提交
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
	.start = softnet_seq_start,
	.next  = softnet_seq_next,
	.stop  = softnet_seq_stop,
	.show  = softnet_seq_show,
};

static int softnet_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &softnet_seq_ops);
}

2386
static const struct file_operations softnet_seq_fops = {
L
Linus Torvalds 已提交
2387 2388 2389 2390 2391 2392 2393
	.owner	 = THIS_MODULE,
	.open    = softnet_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522
static void *ptype_get_idx(loff_t pos)
{
	struct packet_type *pt = NULL;
	loff_t i = 0;
	int t;

	list_for_each_entry_rcu(pt, &ptype_all, list) {
		if (i == pos)
			return pt;
		++i;
	}

	for (t = 0; t < 16; t++) {
		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
			if (i == pos)
				return pt;
			++i;
		}
	}
	return NULL;
}

static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
{
	rcu_read_lock();
	return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
}

static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct packet_type *pt;
	struct list_head *nxt;
	int hash;

	++*pos;
	if (v == SEQ_START_TOKEN)
		return ptype_get_idx(0);

	pt = v;
	nxt = pt->list.next;
	if (pt->type == htons(ETH_P_ALL)) {
		if (nxt != &ptype_all)
			goto found;
		hash = 0;
		nxt = ptype_base[0].next;
	} else
		hash = ntohs(pt->type) & 15;

	while (nxt == &ptype_base[hash]) {
		if (++hash >= 16)
			return NULL;
		nxt = ptype_base[hash].next;
	}
found:
	return list_entry(nxt, struct packet_type, list);
}

static void ptype_seq_stop(struct seq_file *seq, void *v)
{
	rcu_read_unlock();
}

static void ptype_seq_decode(struct seq_file *seq, void *sym)
{
#ifdef CONFIG_KALLSYMS
	unsigned long offset = 0, symsize;
	const char *symname;
	char *modname;
	char namebuf[128];

	symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
				  &modname, namebuf);

	if (symname) {
		char *delim = ":";

		if (!modname)
			modname = delim = "";
		seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
			   symname, offset);
		return;
	}
#endif

	seq_printf(seq, "[%p]", sym);
}

static int ptype_seq_show(struct seq_file *seq, void *v)
{
	struct packet_type *pt = v;

	if (v == SEQ_START_TOKEN)
		seq_puts(seq, "Type Device      Function\n");
	else {
		if (pt->type == htons(ETH_P_ALL))
			seq_puts(seq, "ALL ");
		else
			seq_printf(seq, "%04x", ntohs(pt->type));

		seq_printf(seq, " %-8s ",
			   pt->dev ? pt->dev->name : "");
		ptype_seq_decode(seq,  pt->func);
		seq_putc(seq, '\n');
	}

	return 0;
}

static const struct seq_operations ptype_seq_ops = {
	.start = ptype_seq_start,
	.next  = ptype_seq_next,
	.stop  = ptype_seq_stop,
	.show  = ptype_seq_show,
};

static int ptype_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &ptype_seq_ops);
}

static const struct file_operations ptype_seq_fops = {
	.owner	 = THIS_MODULE,
	.open    = ptype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};


L
Linus Torvalds 已提交
2523 2524 2525 2526 2527 2528 2529 2530
static int __init dev_proc_init(void)
{
	int rc = -ENOMEM;

	if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
		goto out;
	if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
		goto out_dev;
2531 2532 2533
	if (!proc_net_fops_create("ptype", S_IRUGO, &ptype_seq_fops))
		goto out_dev2;

2534
	if (wext_proc_init())
L
Linus Torvalds 已提交
2535 2536 2537 2538 2539
		goto out_softnet;
	rc = 0;
out:
	return rc;
out_softnet:
2540
	proc_net_remove("ptype");
2541 2542
out_dev2:
	proc_net_remove("softnet_stat");
L
Linus Torvalds 已提交
2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
out_dev:
	proc_net_remove("dev");
	goto out;
}
#else
#define dev_proc_init() 0
#endif	/* CONFIG_PROC_FS */


/**
 *	netdev_set_master	-	set up master/slave pair
 *	@slave: slave device
 *	@master: new master device
 *
 *	Changes the master device of the slave. Pass %NULL to break the
 *	bonding. The caller must hold the RTNL semaphore. On a failure
 *	a negative errno code is returned. On success the reference counts
 *	are adjusted, %RTM_NEWLINK is sent to the routing socket and the
 *	function returns zero.
 */
int netdev_set_master(struct net_device *slave, struct net_device *master)
{
	struct net_device *old = slave->master;

	ASSERT_RTNL();

	if (master) {
		if (old)
			return -EBUSY;
		dev_hold(master);
	}

	slave->master = master;
2576

L
Linus Torvalds 已提交
2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590
	synchronize_net();

	if (old)
		dev_put(old);

	if (master)
		slave->flags |= IFF_SLAVE;
	else
		slave->flags &= ~IFF_SLAVE;

	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
	return 0;
}

2591
static void __dev_set_promiscuity(struct net_device *dev, int inc)
L
Linus Torvalds 已提交
2592 2593 2594
{
	unsigned short old_flags = dev->flags;

2595 2596
	ASSERT_RTNL();

L
Linus Torvalds 已提交
2597 2598
	if ((dev->promiscuity += inc) == 0)
		dev->flags &= ~IFF_PROMISC;
2599 2600 2601
	else
		dev->flags |= IFF_PROMISC;
	if (dev->flags != old_flags) {
L
Linus Torvalds 已提交
2602 2603
		printk(KERN_INFO "device %s %s promiscuous mode\n",
		       dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2604
							       "left");
S
Steve Grubb 已提交
2605 2606 2607 2608 2609
		audit_log(current->audit_context, GFP_ATOMIC,
			AUDIT_ANOM_PROMISCUOUS,
			"dev=%s prom=%d old_prom=%d auid=%u",
			dev->name, (dev->flags & IFF_PROMISC),
			(old_flags & IFF_PROMISC),
2610
			audit_get_loginuid(current->audit_context));
2611 2612 2613

		if (dev->change_rx_flags)
			dev->change_rx_flags(dev, IFF_PROMISC);
L
Linus Torvalds 已提交
2614 2615 2616
	}
}

2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
/**
 *	dev_set_promiscuity	- update promiscuity count on a device
 *	@dev: device
 *	@inc: modifier
 *
 *	Add or remove promiscuity from a device. While the count in the device
 *	remains above zero the interface remains promiscuous. Once it hits zero
 *	the device reverts back to normal filtering operation. A negative inc
 *	value is used to drop promiscuity on the device.
 */
void dev_set_promiscuity(struct net_device *dev, int inc)
{
	unsigned short old_flags = dev->flags;

	__dev_set_promiscuity(dev, inc);
	if (dev->flags != old_flags)
		dev_set_rx_mode(dev);
}

L
Linus Torvalds 已提交
2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651
/**
 *	dev_set_allmulti	- update allmulti count on a device
 *	@dev: device
 *	@inc: modifier
 *
 *	Add or remove reception of all multicast frames to a device. While the
 *	count in the device remains above zero the interface remains listening
 *	to all interfaces. Once it hits zero the device reverts back to normal
 *	filtering operation. A negative @inc value is used to drop the counter
 *	when releasing a resource needing all multicasts.
 */

void dev_set_allmulti(struct net_device *dev, int inc)
{
	unsigned short old_flags = dev->flags;

2652 2653
	ASSERT_RTNL();

L
Linus Torvalds 已提交
2654 2655 2656
	dev->flags |= IFF_ALLMULTI;
	if ((dev->allmulti += inc) == 0)
		dev->flags &= ~IFF_ALLMULTI;
2657 2658 2659
	if (dev->flags ^ old_flags) {
		if (dev->change_rx_flags)
			dev->change_rx_flags(dev, IFF_ALLMULTI);
2660
		dev_set_rx_mode(dev);
2661
	}
2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676
}

/*
 *	Upload unicast and multicast address lists to device and
 *	configure RX filtering. When the device doesn't support unicast
 *	filtering it is put in promiscous mode while unicast addresses
 *	are present.
 */
void __dev_set_rx_mode(struct net_device *dev)
{
	/* dev_open will call this function so the list will stay sane. */
	if (!(dev->flags&IFF_UP))
		return;

	if (!netif_device_present(dev))
2677
		return;
2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702

	if (dev->set_rx_mode)
		dev->set_rx_mode(dev);
	else {
		/* Unicast addresses changes may only happen under the rtnl,
		 * therefore calling __dev_set_promiscuity here is safe.
		 */
		if (dev->uc_count > 0 && !dev->uc_promisc) {
			__dev_set_promiscuity(dev, 1);
			dev->uc_promisc = 1;
		} else if (dev->uc_count == 0 && dev->uc_promisc) {
			__dev_set_promiscuity(dev, -1);
			dev->uc_promisc = 0;
		}

		if (dev->set_multicast_list)
			dev->set_multicast_list(dev);
	}
}

void dev_set_rx_mode(struct net_device *dev)
{
	netif_tx_lock_bh(dev);
	__dev_set_rx_mode(dev);
	netif_tx_unlock_bh(dev);
L
Linus Torvalds 已提交
2703 2704
}

2705 2706
int __dev_addr_delete(struct dev_addr_list **list, int *count,
		      void *addr, int alen, int glbl)
2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
{
	struct dev_addr_list *da;

	for (; (da = *list) != NULL; list = &da->next) {
		if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
		    alen == da->da_addrlen) {
			if (glbl) {
				int old_glbl = da->da_gusers;
				da->da_gusers = 0;
				if (old_glbl == 0)
					break;
			}
			if (--da->da_users)
				return 0;

			*list = da->next;
			kfree(da);
2724
			(*count)--;
2725 2726 2727 2728 2729 2730
			return 0;
		}
	}
	return -ENOENT;
}

2731 2732
int __dev_addr_add(struct dev_addr_list **list, int *count,
		   void *addr, int alen, int glbl)
2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
{
	struct dev_addr_list *da;

	for (da = *list; da != NULL; da = da->next) {
		if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
		    da->da_addrlen == alen) {
			if (glbl) {
				int old_glbl = da->da_gusers;
				da->da_gusers = 1;
				if (old_glbl)
					return 0;
			}
			da->da_users++;
			return 0;
		}
	}

	da = kmalloc(sizeof(*da), GFP_ATOMIC);
	if (da == NULL)
		return -ENOMEM;
	memcpy(da->da_addr, addr, alen);
	da->da_addrlen = alen;
	da->da_users = 1;
	da->da_gusers = glbl ? 1 : 0;
	da->next = *list;
	*list = da;
2759
	(*count)++;
2760 2761 2762
	return 0;
}

2763 2764 2765
/**
 *	dev_unicast_delete	- Release secondary unicast address.
 *	@dev: device
R
Randy Dunlap 已提交
2766 2767
 *	@addr: address to delete
 *	@alen: length of @addr
2768 2769
 *
 *	Release reference to a secondary unicast address and remove it
R
Randy Dunlap 已提交
2770
 *	from the device if the reference count drops to zero.
2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
 *
 * 	The caller must hold the rtnl_mutex.
 */
int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
{
	int err;

	ASSERT_RTNL();

	netif_tx_lock_bh(dev);
2781 2782
	err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
	if (!err)
2783 2784 2785 2786 2787 2788 2789 2790 2791
		__dev_set_rx_mode(dev);
	netif_tx_unlock_bh(dev);
	return err;
}
EXPORT_SYMBOL(dev_unicast_delete);

/**
 *	dev_unicast_add		- add a secondary unicast address
 *	@dev: device
R
Randy Dunlap 已提交
2792 2793
 *	@addr: address to delete
 *	@alen: length of @addr
2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806
 *
 *	Add a secondary unicast address to the device or increase
 *	the reference count if it already exists.
 *
 *	The caller must hold the rtnl_mutex.
 */
int dev_unicast_add(struct net_device *dev, void *addr, int alen)
{
	int err;

	ASSERT_RTNL();

	netif_tx_lock_bh(dev);
2807 2808
	err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
	if (!err)
2809 2810 2811 2812 2813 2814
		__dev_set_rx_mode(dev);
	netif_tx_unlock_bh(dev);
	return err;
}
EXPORT_SYMBOL(dev_unicast_add);

2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828
static void __dev_addr_discard(struct dev_addr_list **list)
{
	struct dev_addr_list *tmp;

	while (*list != NULL) {
		tmp = *list;
		*list = tmp->next;
		if (tmp->da_users > tmp->da_gusers)
			printk("__dev_addr_discard: address leakage! "
			       "da_users=%d\n", tmp->da_users);
		kfree(tmp);
	}
}

2829
static void dev_addr_discard(struct net_device *dev)
2830 2831
{
	netif_tx_lock_bh(dev);
2832

2833 2834 2835
	__dev_addr_discard(&dev->uc_list);
	dev->uc_count = 0;

2836 2837
	__dev_addr_discard(&dev->mc_list);
	dev->mc_count = 0;
2838

2839 2840 2841
	netif_tx_unlock_bh(dev);
}

L
Linus Torvalds 已提交
2842 2843 2844 2845 2846 2847
unsigned dev_get_flags(const struct net_device *dev)
{
	unsigned flags;

	flags = (dev->flags & ~(IFF_PROMISC |
				IFF_ALLMULTI |
S
Stefan Rompf 已提交
2848 2849 2850
				IFF_RUNNING |
				IFF_LOWER_UP |
				IFF_DORMANT)) |
L
Linus Torvalds 已提交
2851 2852 2853
		(dev->gflags & (IFF_PROMISC |
				IFF_ALLMULTI));

S
Stefan Rompf 已提交
2854 2855 2856 2857 2858 2859 2860 2861
	if (netif_running(dev)) {
		if (netif_oper_up(dev))
			flags |= IFF_RUNNING;
		if (netif_carrier_ok(dev))
			flags |= IFF_LOWER_UP;
		if (netif_dormant(dev))
			flags |= IFF_DORMANT;
	}
L
Linus Torvalds 已提交
2862 2863 2864 2865 2866 2867

	return flags;
}

int dev_change_flags(struct net_device *dev, unsigned flags)
{
2868
	int ret, changes;
L
Linus Torvalds 已提交
2869 2870
	int old_flags = dev->flags;

2871 2872
	ASSERT_RTNL();

L
Linus Torvalds 已提交
2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886
	/*
	 *	Set the flags on our device.
	 */

	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
			       IFF_AUTOMEDIA)) |
		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
				    IFF_ALLMULTI));

	/*
	 *	Load in the correct multicast list now the flags have changed.
	 */

2887 2888 2889
	if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST)
		dev->change_rx_flags(dev, IFF_MULTICAST);

2890
	dev_set_rx_mode(dev);
L
Linus Torvalds 已提交
2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902

	/*
	 *	Have we downed the interface. We handle IFF_UP ourselves
	 *	according to user attempts to set it, rather than blindly
	 *	setting it.
	 */

	ret = 0;
	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */
		ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);

		if (!ret)
2903
			dev_set_rx_mode(dev);
L
Linus Torvalds 已提交
2904 2905 2906 2907 2908
	}

	if (dev->flags & IFF_UP &&
	    ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
					  IFF_VOLATILE)))
2909
		raw_notifier_call_chain(&netdev_chain,
2910
				NETDEV_CHANGE, dev);
L
Linus Torvalds 已提交
2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927

	if ((flags ^ dev->gflags) & IFF_PROMISC) {
		int inc = (flags & IFF_PROMISC) ? +1 : -1;
		dev->gflags ^= IFF_PROMISC;
		dev_set_promiscuity(dev, inc);
	}

	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
	   is important. Some (broken) drivers set IFF_PROMISC, when
	   IFF_ALLMULTI is requested not asking us and not reporting.
	 */
	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
		int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
		dev->gflags ^= IFF_ALLMULTI;
		dev_set_allmulti(dev, inc);
	}

2928 2929 2930 2931
	/* Exclude state transition flags, already notified */
	changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
	if (changes)
		rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
L
Linus Torvalds 已提交
2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955

	return ret;
}

int dev_set_mtu(struct net_device *dev, int new_mtu)
{
	int err;

	if (new_mtu == dev->mtu)
		return 0;

	/*	MTU must be positive.	 */
	if (new_mtu < 0)
		return -EINVAL;

	if (!netif_device_present(dev))
		return -ENODEV;

	err = 0;
	if (dev->change_mtu)
		err = dev->change_mtu(dev, new_mtu);
	else
		dev->mtu = new_mtu;
	if (!err && dev->flags & IFF_UP)
2956
		raw_notifier_call_chain(&netdev_chain,
2957
				NETDEV_CHANGEMTU, dev);
L
Linus Torvalds 已提交
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972
	return err;
}

int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
{
	int err;

	if (!dev->set_mac_address)
		return -EOPNOTSUPP;
	if (sa->sa_family != dev->type)
		return -EINVAL;
	if (!netif_device_present(dev))
		return -ENODEV;
	err = dev->set_mac_address(dev, sa);
	if (!err)
2973
		raw_notifier_call_chain(&netdev_chain,
2974
				NETDEV_CHANGEADDR, dev);
L
Linus Torvalds 已提交
2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029
	return err;
}

/*
 *	Perform the SIOCxIFxxx calls.
 */
static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
{
	int err;
	struct net_device *dev = __dev_get_by_name(ifr->ifr_name);

	if (!dev)
		return -ENODEV;

	switch (cmd) {
		case SIOCGIFFLAGS:	/* Get interface flags */
			ifr->ifr_flags = dev_get_flags(dev);
			return 0;

		case SIOCSIFFLAGS:	/* Set interface flags */
			return dev_change_flags(dev, ifr->ifr_flags);

		case SIOCGIFMETRIC:	/* Get the metric on the interface
					   (currently unused) */
			ifr->ifr_metric = 0;
			return 0;

		case SIOCSIFMETRIC:	/* Set the metric on the interface
					   (currently unused) */
			return -EOPNOTSUPP;

		case SIOCGIFMTU:	/* Get the MTU of a device */
			ifr->ifr_mtu = dev->mtu;
			return 0;

		case SIOCSIFMTU:	/* Set the MTU of a device */
			return dev_set_mtu(dev, ifr->ifr_mtu);

		case SIOCGIFHWADDR:
			if (!dev->addr_len)
				memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
			else
				memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
				       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
			ifr->ifr_hwaddr.sa_family = dev->type;
			return 0;

		case SIOCSIFHWADDR:
			return dev_set_mac_address(dev, &ifr->ifr_hwaddr);

		case SIOCSIFHWBROADCAST:
			if (ifr->ifr_hwaddr.sa_family != dev->type)
				return -EINVAL;
			memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
			       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3030
			raw_notifier_call_chain(&netdev_chain,
L
Linus Torvalds 已提交
3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148
					    NETDEV_CHANGEADDR, dev);
			return 0;

		case SIOCGIFMAP:
			ifr->ifr_map.mem_start = dev->mem_start;
			ifr->ifr_map.mem_end   = dev->mem_end;
			ifr->ifr_map.base_addr = dev->base_addr;
			ifr->ifr_map.irq       = dev->irq;
			ifr->ifr_map.dma       = dev->dma;
			ifr->ifr_map.port      = dev->if_port;
			return 0;

		case SIOCSIFMAP:
			if (dev->set_config) {
				if (!netif_device_present(dev))
					return -ENODEV;
				return dev->set_config(dev, &ifr->ifr_map);
			}
			return -EOPNOTSUPP;

		case SIOCADDMULTI:
			if (!dev->set_multicast_list ||
			    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
				return -EINVAL;
			if (!netif_device_present(dev))
				return -ENODEV;
			return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
					  dev->addr_len, 1);

		case SIOCDELMULTI:
			if (!dev->set_multicast_list ||
			    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
				return -EINVAL;
			if (!netif_device_present(dev))
				return -ENODEV;
			return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
					     dev->addr_len, 1);

		case SIOCGIFINDEX:
			ifr->ifr_ifindex = dev->ifindex;
			return 0;

		case SIOCGIFTXQLEN:
			ifr->ifr_qlen = dev->tx_queue_len;
			return 0;

		case SIOCSIFTXQLEN:
			if (ifr->ifr_qlen < 0)
				return -EINVAL;
			dev->tx_queue_len = ifr->ifr_qlen;
			return 0;

		case SIOCSIFNAME:
			ifr->ifr_newname[IFNAMSIZ-1] = '\0';
			return dev_change_name(dev, ifr->ifr_newname);

		/*
		 *	Unknown or private ioctl
		 */

		default:
			if ((cmd >= SIOCDEVPRIVATE &&
			    cmd <= SIOCDEVPRIVATE + 15) ||
			    cmd == SIOCBONDENSLAVE ||
			    cmd == SIOCBONDRELEASE ||
			    cmd == SIOCBONDSETHWADDR ||
			    cmd == SIOCBONDSLAVEINFOQUERY ||
			    cmd == SIOCBONDINFOQUERY ||
			    cmd == SIOCBONDCHANGEACTIVE ||
			    cmd == SIOCGMIIPHY ||
			    cmd == SIOCGMIIREG ||
			    cmd == SIOCSMIIREG ||
			    cmd == SIOCBRADDIF ||
			    cmd == SIOCBRDELIF ||
			    cmd == SIOCWANDEV) {
				err = -EOPNOTSUPP;
				if (dev->do_ioctl) {
					if (netif_device_present(dev))
						err = dev->do_ioctl(dev, ifr,
								    cmd);
					else
						err = -ENODEV;
				}
			} else
				err = -EINVAL;

	}
	return err;
}

/*
 *	This function handles all "interface"-type I/O control requests. The actual
 *	'doing' part of this is dev_ifsioc above.
 */

/**
 *	dev_ioctl	-	network device ioctl
 *	@cmd: command to issue
 *	@arg: pointer to a struct ifreq in user space
 *
 *	Issue ioctl functions to devices. This is normally called by the
 *	user space syscall interfaces but can sometimes be useful for
 *	other purposes. The return value is the return from the syscall if
 *	positive or a negative errno code on error.
 */

int dev_ioctl(unsigned int cmd, void __user *arg)
{
	struct ifreq ifr;
	int ret;
	char *colon;

	/* One special case: SIOCGIFCONF takes ifconf argument
	   and requires shared lock, because it sleeps writing
	   to user space.
	 */

	if (cmd == SIOCGIFCONF) {
3149
		rtnl_lock();
L
Linus Torvalds 已提交
3150
		ret = dev_ifconf((char __user *) arg);
3151
		rtnl_unlock();
L
Linus Torvalds 已提交
3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260
		return ret;
	}
	if (cmd == SIOCGIFNAME)
		return dev_ifname((struct ifreq __user *)arg);

	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
		return -EFAULT;

	ifr.ifr_name[IFNAMSIZ-1] = 0;

	colon = strchr(ifr.ifr_name, ':');
	if (colon)
		*colon = 0;

	/*
	 *	See which interface the caller is talking about.
	 */

	switch (cmd) {
		/*
		 *	These ioctl calls:
		 *	- can be done by all.
		 *	- atomic and do not require locking.
		 *	- return a value
		 */
		case SIOCGIFFLAGS:
		case SIOCGIFMETRIC:
		case SIOCGIFMTU:
		case SIOCGIFHWADDR:
		case SIOCGIFSLAVE:
		case SIOCGIFMAP:
		case SIOCGIFINDEX:
		case SIOCGIFTXQLEN:
			dev_load(ifr.ifr_name);
			read_lock(&dev_base_lock);
			ret = dev_ifsioc(&ifr, cmd);
			read_unlock(&dev_base_lock);
			if (!ret) {
				if (colon)
					*colon = ':';
				if (copy_to_user(arg, &ifr,
						 sizeof(struct ifreq)))
					ret = -EFAULT;
			}
			return ret;

		case SIOCETHTOOL:
			dev_load(ifr.ifr_name);
			rtnl_lock();
			ret = dev_ethtool(&ifr);
			rtnl_unlock();
			if (!ret) {
				if (colon)
					*colon = ':';
				if (copy_to_user(arg, &ifr,
						 sizeof(struct ifreq)))
					ret = -EFAULT;
			}
			return ret;

		/*
		 *	These ioctl calls:
		 *	- require superuser power.
		 *	- require strict serialization.
		 *	- return a value
		 */
		case SIOCGMIIPHY:
		case SIOCGMIIREG:
		case SIOCSIFNAME:
			if (!capable(CAP_NET_ADMIN))
				return -EPERM;
			dev_load(ifr.ifr_name);
			rtnl_lock();
			ret = dev_ifsioc(&ifr, cmd);
			rtnl_unlock();
			if (!ret) {
				if (colon)
					*colon = ':';
				if (copy_to_user(arg, &ifr,
						 sizeof(struct ifreq)))
					ret = -EFAULT;
			}
			return ret;

		/*
		 *	These ioctl calls:
		 *	- require superuser power.
		 *	- require strict serialization.
		 *	- do not return a value
		 */
		case SIOCSIFFLAGS:
		case SIOCSIFMETRIC:
		case SIOCSIFMTU:
		case SIOCSIFMAP:
		case SIOCSIFHWADDR:
		case SIOCSIFSLAVE:
		case SIOCADDMULTI:
		case SIOCDELMULTI:
		case SIOCSIFHWBROADCAST:
		case SIOCSIFTXQLEN:
		case SIOCSMIIREG:
		case SIOCBONDENSLAVE:
		case SIOCBONDRELEASE:
		case SIOCBONDSETHWADDR:
		case SIOCBONDCHANGEACTIVE:
		case SIOCBRADDIF:
		case SIOCBRDELIF:
			if (!capable(CAP_NET_ADMIN))
				return -EPERM;
3261 3262 3263
			/* fall through */
		case SIOCBONDSLAVEINFOQUERY:
		case SIOCBONDINFOQUERY:
L
Linus Torvalds 已提交
3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295
			dev_load(ifr.ifr_name);
			rtnl_lock();
			ret = dev_ifsioc(&ifr, cmd);
			rtnl_unlock();
			return ret;

		case SIOCGIFMEM:
			/* Get the per device memory space. We can add this but
			 * currently do not support it */
		case SIOCSIFMEM:
			/* Set the per device memory buffer space.
			 * Not applicable in our case */
		case SIOCSIFLINK:
			return -EINVAL;

		/*
		 *	Unknown or private ioctl.
		 */
		default:
			if (cmd == SIOCWANDEV ||
			    (cmd >= SIOCDEVPRIVATE &&
			     cmd <= SIOCDEVPRIVATE + 15)) {
				dev_load(ifr.ifr_name);
				rtnl_lock();
				ret = dev_ifsioc(&ifr, cmd);
				rtnl_unlock();
				if (!ret && copy_to_user(arg, &ifr,
							 sizeof(struct ifreq)))
					ret = -EFAULT;
				return ret;
			}
			/* Take care of Wireless Extensions */
3296 3297
			if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
				return wext_handle_ioctl(&ifr, cmd, arg);
L
Linus Torvalds 已提交
3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326
			return -EINVAL;
	}
}


/**
 *	dev_new_index	-	allocate an ifindex
 *
 *	Returns a suitable unique value for a new device interface
 *	number.  The caller must hold the rtnl semaphore or the
 *	dev_base_lock to be sure it remains unique.
 */
static int dev_new_index(void)
{
	static int ifindex;
	for (;;) {
		if (++ifindex <= 0)
			ifindex = 1;
		if (!__dev_get_by_index(ifindex))
			return ifindex;
	}
}

static int dev_boot_phase = 1;

/* Delayed registration/unregisteration */
static DEFINE_SPINLOCK(net_todo_list_lock);
static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);

3327
static void net_set_todo(struct net_device *dev)
L
Linus Torvalds 已提交
3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359
{
	spin_lock(&net_todo_list_lock);
	list_add_tail(&dev->todo_list, &net_todo_list);
	spin_unlock(&net_todo_list_lock);
}

/**
 *	register_netdevice	- register a network device
 *	@dev: device to register
 *
 *	Take a completed network device structure and add it to the kernel
 *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
 *	chain. 0 is returned on success. A negative errno code is returned
 *	on a failure to set up the device, or if the name is a duplicate.
 *
 *	Callers must hold the rtnl semaphore. You may want
 *	register_netdev() instead of this.
 *
 *	BUGS:
 *	The locking appears insufficient to guarantee two parallel registers
 *	will not get the same name.
 */

int register_netdevice(struct net_device *dev)
{
	struct hlist_head *head;
	struct hlist_node *p;
	int ret;

	BUG_ON(dev_boot_phase);
	ASSERT_RTNL();

3360 3361
	might_sleep();

L
Linus Torvalds 已提交
3362 3363 3364 3365
	/* When net_device's are persistent, this will be fatal. */
	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);

	spin_lock_init(&dev->queue_lock);
H
Herbert Xu 已提交
3366
	spin_lock_init(&dev->_xmit_lock);
3367
	netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
L
Linus Torvalds 已提交
3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378
	dev->xmit_lock_owner = -1;
	spin_lock_init(&dev->ingress_lock);

	dev->iflink = -1;

	/* Init, if this function is available */
	if (dev->init) {
		ret = dev->init(dev);
		if (ret) {
			if (ret > 0)
				ret = -EIO;
3379
			goto out;
L
Linus Torvalds 已提交
3380 3381
		}
	}
3382

L
Linus Torvalds 已提交
3383 3384
	if (!dev_valid_name(dev->name)) {
		ret = -EINVAL;
3385
		goto err_uninit;
L
Linus Torvalds 已提交
3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398
	}

	dev->ifindex = dev_new_index();
	if (dev->iflink == -1)
		dev->iflink = dev->ifindex;

	/* Check for existence of name */
	head = dev_name_hash(dev->name);
	hlist_for_each(p, head) {
		struct net_device *d
			= hlist_entry(p, struct net_device, name_hlist);
		if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
			ret = -EEXIST;
3399
			goto err_uninit;
L
Linus Torvalds 已提交
3400
		}
3401
	}
L
Linus Torvalds 已提交
3402

3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418
	/* Fix illegal checksum combinations */
	if ((dev->features & NETIF_F_HW_CSUM) &&
	    (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
		printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
		       dev->name);
		dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
	}

	if ((dev->features & NETIF_F_NO_CSUM) &&
	    (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
		printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
		       dev->name);
		dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
	}


L
Linus Torvalds 已提交
3419 3420
	/* Fix illegal SG+CSUM combinations. */
	if ((dev->features & NETIF_F_SG) &&
3421
	    !(dev->features & NETIF_F_ALL_CSUM)) {
3422
		printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
L
Linus Torvalds 已提交
3423 3424 3425 3426 3427 3428 3429
		       dev->name);
		dev->features &= ~NETIF_F_SG;
	}

	/* TSO requires that SG is present as well. */
	if ((dev->features & NETIF_F_TSO) &&
	    !(dev->features & NETIF_F_SG)) {
3430
		printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
L
Linus Torvalds 已提交
3431 3432 3433
		       dev->name);
		dev->features &= ~NETIF_F_TSO;
	}
3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447
	if (dev->features & NETIF_F_UFO) {
		if (!(dev->features & NETIF_F_HW_CSUM)) {
			printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
					"NETIF_F_HW_CSUM feature.\n",
							dev->name);
			dev->features &= ~NETIF_F_UFO;
		}
		if (!(dev->features & NETIF_F_SG)) {
			printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
					"NETIF_F_SG feature.\n",
					dev->name);
			dev->features &= ~NETIF_F_UFO;
		}
	}
L
Linus Torvalds 已提交
3448 3449 3450 3451 3452 3453 3454 3455 3456

	/*
	 *	nil rebuild_header routine,
	 *	that should be never called and used as just bug trap.
	 */

	if (!dev->rebuild_header)
		dev->rebuild_header = default_rebuild_header;

3457 3458
	ret = netdev_register_sysfs(dev);
	if (ret)
3459
		goto err_uninit;
3460 3461
	dev->reg_state = NETREG_REGISTERED;

L
Linus Torvalds 已提交
3462 3463 3464 3465 3466 3467 3468 3469 3470
	/*
	 *	Default initial state at registry is that the
	 *	device is present.
	 */

	set_bit(__LINK_STATE_PRESENT, &dev->state);

	dev_init_scheduler(dev);
	write_lock_bh(&dev_base_lock);
3471
	list_add_tail(&dev->dev_list, &dev_base_head);
L
Linus Torvalds 已提交
3472 3473 3474 3475 3476 3477
	hlist_add_head(&dev->name_hlist, head);
	hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
	dev_hold(dev);
	write_unlock_bh(&dev_base_lock);

	/* Notify protocols, that a new device appeared. */
3478 3479 3480 3481
	ret = raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
	ret = notifier_to_errno(ret);
	if (ret)
		unregister_netdevice(dev);
L
Linus Torvalds 已提交
3482 3483 3484

out:
	return ret;
3485 3486 3487 3488 3489

err_uninit:
	if (dev->uninit)
		dev->uninit(dev);
	goto out;
L
Linus Torvalds 已提交
3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500
}

/**
 *	register_netdev	- register a network device
 *	@dev: device to register
 *
 *	Take a completed network device structure and add it to the kernel
 *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
 *	chain. 0 is returned on success. A negative errno code is returned
 *	on a failure to set up the device, or if the name is a duplicate.
 *
3501
 *	This is a wrapper around register_netdevice that takes the rtnl semaphore
L
Linus Torvalds 已提交
3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519
 *	and expands the device name if you passed a format string to
 *	alloc_netdev.
 */
int register_netdev(struct net_device *dev)
{
	int err;

	rtnl_lock();

	/*
	 * If the name is a format string the caller wants us to do a
	 * name allocation.
	 */
	if (strchr(dev->name, '%')) {
		err = dev_alloc_name(dev, dev->name);
		if (err < 0)
			goto out;
	}
3520

L
Linus Torvalds 已提交
3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536
	err = register_netdevice(dev);
out:
	rtnl_unlock();
	return err;
}
EXPORT_SYMBOL(register_netdev);

/*
 * netdev_wait_allrefs - wait until all references are gone.
 *
 * This is called when unregistering network devices.
 *
 * Any protocol or device that holds a reference should register
 * for netdevice notification, and cleanup and put back the
 * reference if they receive an UNREGISTER event.
 * We can get stuck here if buggy protocols don't correctly
3537
 * call dev_put.
L
Linus Torvalds 已提交
3538 3539 3540 3541 3542 3543 3544 3545
 */
static void netdev_wait_allrefs(struct net_device *dev)
{
	unsigned long rebroadcast_time, warning_time;

	rebroadcast_time = warning_time = jiffies;
	while (atomic_read(&dev->refcnt) != 0) {
		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
3546
			rtnl_lock();
L
Linus Torvalds 已提交
3547 3548

			/* Rebroadcast unregister notification */
3549
			raw_notifier_call_chain(&netdev_chain,
L
Linus Torvalds 已提交
3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562
					    NETDEV_UNREGISTER, dev);

			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
				     &dev->state)) {
				/* We must not have linkwatch events
				 * pending on unregister. If this
				 * happens, we simply run the queue
				 * unscheduled, resulting in a noop
				 * for this device.
				 */
				linkwatch_run_queue();
			}

3563
			__rtnl_unlock();
L
Linus Torvalds 已提交
3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595

			rebroadcast_time = jiffies;
		}

		msleep(250);

		if (time_after(jiffies, warning_time + 10 * HZ)) {
			printk(KERN_EMERG "unregister_netdevice: "
			       "waiting for %s to become free. Usage "
			       "count = %d\n",
			       dev->name, atomic_read(&dev->refcnt));
			warning_time = jiffies;
		}
	}
}

/* The sequence is:
 *
 *	rtnl_lock();
 *	...
 *	register_netdevice(x1);
 *	register_netdevice(x2);
 *	...
 *	unregister_netdevice(y1);
 *	unregister_netdevice(y2);
 *      ...
 *	rtnl_unlock();
 *	free_netdev(y1);
 *	free_netdev(y2);
 *
 * We are invoked by rtnl_unlock() after it drops the semaphore.
 * This allows us to deal with problems:
3596
 * 1) We can delete sysfs objects which invoke hotplug
L
Linus Torvalds 已提交
3597 3598 3599 3600
 *    without deadlocking with linkwatch via keventd.
 * 2) Since we run with the RTNL semaphore not held, we can sleep
 *    safely in order to wait for the netdev refcnt to drop to zero.
 */
A
Arjan van de Ven 已提交
3601
static DEFINE_MUTEX(net_todo_run_mutex);
L
Linus Torvalds 已提交
3602 3603
void netdev_run_todo(void)
{
3604
	struct list_head list;
L
Linus Torvalds 已提交
3605 3606

	/* Need to guard against multiple cpu's getting out of order. */
A
Arjan van de Ven 已提交
3607
	mutex_lock(&net_todo_run_mutex);
L
Linus Torvalds 已提交
3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618

	/* Not safe to do outside the semaphore.  We must not return
	 * until all unregister events invoked by the local processor
	 * have been completed (either by this todo run, or one on
	 * another cpu).
	 */
	if (list_empty(&net_todo_list))
		goto out;

	/* Snapshot list, allow later requests */
	spin_lock(&net_todo_list_lock);
3619
	list_replace_init(&net_todo_list, &list);
L
Linus Torvalds 已提交
3620
	spin_unlock(&net_todo_list_lock);
3621

L
Linus Torvalds 已提交
3622 3623 3624 3625 3626
	while (!list_empty(&list)) {
		struct net_device *dev
			= list_entry(list.next, struct net_device, todo_list);
		list_del(&dev->todo_list);

3627 3628 3629 3630 3631 3632
		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
			printk(KERN_ERR "network todo '%s' but state %d\n",
			       dev->name, dev->reg_state);
			dump_stack();
			continue;
		}
L
Linus Torvalds 已提交
3633

3634
		dev->reg_state = NETREG_UNREGISTERED;
L
Linus Torvalds 已提交
3635

3636
		netdev_wait_allrefs(dev);
L
Linus Torvalds 已提交
3637

3638 3639 3640 3641 3642
		/* paranoia */
		BUG_ON(atomic_read(&dev->refcnt));
		BUG_TRAP(!dev->ip_ptr);
		BUG_TRAP(!dev->ip6_ptr);
		BUG_TRAP(!dev->dn_ptr);
L
Linus Torvalds 已提交
3643

3644 3645
		if (dev->destructor)
			dev->destructor(dev);
3646 3647 3648

		/* Free network device */
		kobject_put(&dev->dev.kobj);
L
Linus Torvalds 已提交
3649 3650 3651
	}

out:
A
Arjan van de Ven 已提交
3652
	mutex_unlock(&net_todo_run_mutex);
L
Linus Torvalds 已提交
3653 3654
}

3655
static struct net_device_stats *internal_stats(struct net_device *dev)
R
Rusty Russell 已提交
3656
{
3657
	return &dev->stats;
R
Rusty Russell 已提交
3658 3659
}

L
Linus Torvalds 已提交
3660
/**
3661
 *	alloc_netdev_mq - allocate network device
L
Linus Torvalds 已提交
3662 3663 3664
 *	@sizeof_priv:	size of private data to allocate space for
 *	@name:		device name format string
 *	@setup:		callback to initialize device
3665
 *	@queue_count:	the number of subqueues to allocate
L
Linus Torvalds 已提交
3666 3667
 *
 *	Allocates a struct net_device with private data area for driver use
3668 3669
 *	and performs basic initialization.  Also allocates subquue structs
 *	for each queue on the device at the end of the netdevice.
L
Linus Torvalds 已提交
3670
 */
3671 3672
struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
		void (*setup)(struct net_device *), unsigned int queue_count)
L
Linus Torvalds 已提交
3673 3674 3675 3676 3677
{
	void *p;
	struct net_device *dev;
	int alloc_size;

3678 3679
	BUG_ON(strlen(name) >= sizeof(dev->name));

L
Linus Torvalds 已提交
3680
	/* ensure 32-byte alignment of both the device and private area */
3681
	alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST +
3682
		     (sizeof(struct net_device_subqueue) * (queue_count - 1))) &
3683
		     ~NETDEV_ALIGN_CONST;
L
Linus Torvalds 已提交
3684 3685
	alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;

3686
	p = kzalloc(alloc_size, GFP_KERNEL);
L
Linus Torvalds 已提交
3687
	if (!p) {
3688
		printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
L
Linus Torvalds 已提交
3689 3690 3691 3692 3693 3694 3695
		return NULL;
	}

	dev = (struct net_device *)
		(((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
	dev->padded = (char *)dev - (char *)p;

3696 3697 3698 3699
	if (sizeof_priv) {
		dev->priv = ((char *)dev +
			     ((sizeof(struct net_device) +
			       (sizeof(struct net_device_subqueue) *
3700
				(queue_count - 1)) + NETDEV_ALIGN_CONST)
3701 3702 3703 3704
			      & ~NETDEV_ALIGN_CONST));
	}

	dev->egress_subqueue_count = queue_count;
L
Linus Torvalds 已提交
3705

3706
	dev->get_stats = internal_stats;
L
Linus Torvalds 已提交
3707 3708 3709 3710
	setup(dev);
	strcpy(dev->name, name);
	return dev;
}
3711
EXPORT_SYMBOL(alloc_netdev_mq);
L
Linus Torvalds 已提交
3712 3713 3714 3715 3716

/**
 *	free_netdev - free network device
 *	@dev: device
 *
3717 3718
 *	This function does the last stage of destroying an allocated device
 * 	interface. The reference to the device object is released.
L
Linus Torvalds 已提交
3719 3720 3721 3722 3723
 *	If this is the last reference then it will be freed.
 */
void free_netdev(struct net_device *dev)
{
#ifdef CONFIG_SYSFS
S
Stephen Hemminger 已提交
3724
	/*  Compatibility with error handling in drivers */
L
Linus Torvalds 已提交
3725 3726 3727 3728 3729 3730 3731 3732
	if (dev->reg_state == NETREG_UNINITIALIZED) {
		kfree((char *)dev - dev->padded);
		return;
	}

	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
	dev->reg_state = NETREG_RELEASED;

3733 3734
	/* will free via device release */
	put_device(&dev->dev);
L
Linus Torvalds 已提交
3735 3736 3737 3738
#else
	kfree((char *)dev - dev->padded);
#endif
}
3739

L
Linus Torvalds 已提交
3740
/* Synchronize with packet receive processing. */
3741
void synchronize_net(void)
L
Linus Torvalds 已提交
3742 3743
{
	might_sleep();
3744
	synchronize_rcu();
L
Linus Torvalds 已提交
3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758
}

/**
 *	unregister_netdevice - remove device from the kernel
 *	@dev: device
 *
 *	This function shuts down a device interface and removes it
 *	from the kernel tables. On success 0 is returned, on a failure
 *	a negative errno code is returned.
 *
 *	Callers must hold the rtnl semaphore.  You may want
 *	unregister_netdev() instead of this.
 */

3759
void unregister_netdevice(struct net_device *dev)
L
Linus Torvalds 已提交
3760 3761 3762 3763 3764 3765 3766 3767
{
	BUG_ON(dev_boot_phase);
	ASSERT_RTNL();

	/* Some devices call without registering for initialization unwind. */
	if (dev->reg_state == NETREG_UNINITIALIZED) {
		printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
				  "was registered\n", dev->name, dev);
3768 3769 3770

		WARN_ON(1);
		return;
L
Linus Torvalds 已提交
3771 3772 3773 3774 3775 3776 3777 3778 3779
	}

	BUG_ON(dev->reg_state != NETREG_REGISTERED);

	/* If device is running, close it first. */
	if (dev->flags & IFF_UP)
		dev_close(dev);

	/* And unlink it from device chain. */
3780 3781 3782 3783 3784
	write_lock_bh(&dev_base_lock);
	list_del(&dev->dev_list);
	hlist_del(&dev->name_hlist);
	hlist_del(&dev->index_hlist);
	write_unlock_bh(&dev_base_lock);
L
Linus Torvalds 已提交
3785 3786 3787 3788 3789 3790 3791 3792

	dev->reg_state = NETREG_UNREGISTERING;

	synchronize_net();

	/* Shutdown queueing discipline. */
	dev_shutdown(dev);

3793

L
Linus Torvalds 已提交
3794 3795 3796
	/* Notify protocols, that we are about to destroy
	   this device. They should clean all the things.
	*/
3797
	raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3798

L
Linus Torvalds 已提交
3799
	/*
3800
	 *	Flush the unicast and multicast chains
L
Linus Torvalds 已提交
3801
	 */
3802
	dev_addr_discard(dev);
L
Linus Torvalds 已提交
3803 3804 3805 3806 3807 3808 3809

	if (dev->uninit)
		dev->uninit(dev);

	/* Notifier chain MUST detach us from master device. */
	BUG_TRAP(!dev->master);

3810 3811 3812
	/* Remove entries from sysfs */
	netdev_unregister_sysfs(dev);

L
Linus Torvalds 已提交
3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851
	/* Finish processing unregister after unlock */
	net_set_todo(dev);

	synchronize_net();

	dev_put(dev);
}

/**
 *	unregister_netdev - remove device from the kernel
 *	@dev: device
 *
 *	This function shuts down a device interface and removes it
 *	from the kernel tables. On success 0 is returned, on a failure
 *	a negative errno code is returned.
 *
 *	This is just a wrapper for unregister_netdevice that takes
 *	the rtnl semaphore.  In general you want to use this and not
 *	unregister_netdevice.
 */
void unregister_netdev(struct net_device *dev)
{
	rtnl_lock();
	unregister_netdevice(dev);
	rtnl_unlock();
}

EXPORT_SYMBOL(unregister_netdev);

static int dev_cpu_callback(struct notifier_block *nfb,
			    unsigned long action,
			    void *ocpu)
{
	struct sk_buff **list_skb;
	struct net_device **list_net;
	struct sk_buff *skb;
	unsigned int cpu, oldcpu = (unsigned long)ocpu;
	struct softnet_data *sd, *oldsd;

3852
	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
L
Linus Torvalds 已提交
3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885
		return NOTIFY_OK;

	local_irq_disable();
	cpu = smp_processor_id();
	sd = &per_cpu(softnet_data, cpu);
	oldsd = &per_cpu(softnet_data, oldcpu);

	/* Find end of our completion_queue. */
	list_skb = &sd->completion_queue;
	while (*list_skb)
		list_skb = &(*list_skb)->next;
	/* Append completion queue from offline CPU. */
	*list_skb = oldsd->completion_queue;
	oldsd->completion_queue = NULL;

	/* Find end of our output_queue. */
	list_net = &sd->output_queue;
	while (*list_net)
		list_net = &(*list_net)->next_sched;
	/* Append output queue from offline CPU. */
	*list_net = oldsd->output_queue;
	oldsd->output_queue = NULL;

	raise_softirq_irqoff(NET_TX_SOFTIRQ);
	local_irq_enable();

	/* Process offline CPU's input_pkt_queue */
	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
		netif_rx(skb);

	return NOTIFY_OK;
}

3886 3887
#ifdef CONFIG_NET_DMA
/**
R
Randy Dunlap 已提交
3888 3889 3890 3891 3892
 * net_dma_rebalance - try to maintain one DMA channel per CPU
 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
 *
 * This is called when the number of channels allocated to the net_dma client
 * changes.  The net_dma client tries to have one DMA channel per CPU.
3893
 */
3894 3895

static void net_dma_rebalance(struct net_dma *net_dma)
3896
{
3897
	unsigned int cpu, i, n, chan_idx;
3898 3899
	struct dma_chan *chan;

3900
	if (cpus_empty(net_dma->channel_mask)) {
3901
		for_each_online_cpu(cpu)
A
Alexey Dobriyan 已提交
3902
			rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
3903 3904 3905 3906 3907 3908
		return;
	}

	i = 0;
	cpu = first_cpu(cpu_online_map);

3909 3910 3911 3912 3913 3914
	for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
		chan = net_dma->channels[chan_idx];

		n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
		   + (i < (num_online_cpus() %
			cpus_weight(net_dma->channel_mask)) ? 1 : 0));
3915 3916

		while(n) {
A
Alexey Dobriyan 已提交
3917
			per_cpu(softnet_data, cpu).net_dma = chan;
3918 3919 3920 3921 3922 3923 3924 3925 3926 3927
			cpu = next_cpu(cpu, cpu_online_map);
			n--;
		}
		i++;
	}
}

/**
 * netdev_dma_event - event callback for the net_dma_client
 * @client: should always be net_dma_client
R
Randy Dunlap 已提交
3928
 * @chan: DMA channel for the event
R
Randy Dunlap 已提交
3929
 * @state: DMA state to be handled
3930
 */
3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955
static enum dma_state_client
netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
	enum dma_state state)
{
	int i, found = 0, pos = -1;
	struct net_dma *net_dma =
		container_of(client, struct net_dma, client);
	enum dma_state_client ack = DMA_DUP; /* default: take no action */

	spin_lock(&net_dma->lock);
	switch (state) {
	case DMA_RESOURCE_AVAILABLE:
		for (i = 0; i < NR_CPUS; i++)
			if (net_dma->channels[i] == chan) {
				found = 1;
				break;
			} else if (net_dma->channels[i] == NULL && pos < 0)
				pos = i;

		if (!found && pos >= 0) {
			ack = DMA_ACK;
			net_dma->channels[pos] = chan;
			cpu_set(pos, net_dma->channel_mask);
			net_dma_rebalance(net_dma);
		}
3956 3957
		break;
	case DMA_RESOURCE_REMOVED:
3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970
		for (i = 0; i < NR_CPUS; i++)
			if (net_dma->channels[i] == chan) {
				found = 1;
				pos = i;
				break;
			}

		if (found) {
			ack = DMA_ACK;
			cpu_clear(pos, net_dma->channel_mask);
			net_dma->channels[i] = NULL;
			net_dma_rebalance(net_dma);
		}
3971 3972 3973 3974
		break;
	default:
		break;
	}
3975 3976 3977
	spin_unlock(&net_dma->lock);

	return ack;
3978 3979 3980 3981 3982 3983 3984
}

/**
 * netdev_dma_regiser - register the networking subsystem as a DMA client
 */
static int __init netdev_dma_register(void)
{
3985 3986 3987 3988
	spin_lock_init(&net_dma.lock);
	dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
	dma_async_client_register(&net_dma.client);
	dma_async_client_chan_request(&net_dma.client);
3989 3990 3991 3992 3993 3994
	return 0;
}

#else
static int __init netdev_dma_register(void) { return -ENODEV; }
#endif /* CONFIG_NET_DMA */
L
Linus Torvalds 已提交
3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019

/*
 *	Initialize the DEV module. At boot time this walks the device list and
 *	unhooks any devices that fail to initialise (normally hardware not
 *	present) and leaves us with a valid list of present and active devices.
 *
 */

/*
 *       This is called single threaded during boot, so no need
 *       to take the rtnl semaphore.
 */
static int __init net_dev_init(void)
{
	int i, rc = -ENOMEM;

	BUG_ON(!dev_boot_phase);

	if (dev_proc_init())
		goto out;

	if (netdev_sysfs_init())
		goto out;

	INIT_LIST_HEAD(&ptype_all);
4020
	for (i = 0; i < 16; i++)
L
Linus Torvalds 已提交
4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032
		INIT_LIST_HEAD(&ptype_base[i]);

	for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
		INIT_HLIST_HEAD(&dev_name_head[i]);

	for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
		INIT_HLIST_HEAD(&dev_index_head[i]);

	/*
	 *	Initialise the packet receive queues.
	 */

4033
	for_each_possible_cpu(i) {
L
Linus Torvalds 已提交
4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045
		struct softnet_data *queue;

		queue = &per_cpu(softnet_data, i);
		skb_queue_head_init(&queue->input_pkt_queue);
		queue->completion_queue = NULL;
		INIT_LIST_HEAD(&queue->poll_list);
		set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
		queue->backlog_dev.weight = weight_p;
		queue->backlog_dev.poll = process_backlog;
		atomic_set(&queue->backlog_dev.refcnt, 1);
	}

4046 4047
	netdev_dma_register();

L
Linus Torvalds 已提交
4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065
	dev_boot_phase = 0;

	open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
	open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);

	hotcpu_notifier(dev_cpu_callback, 0);
	dst_init();
	dev_mcast_init();
	rc = 0;
out:
	return rc;
}

subsys_initcall(net_dev_init);

EXPORT_SYMBOL(__dev_get_by_index);
EXPORT_SYMBOL(__dev_get_by_name);
EXPORT_SYMBOL(__dev_remove_pack);
4066
EXPORT_SYMBOL(dev_valid_name);
L
Linus Torvalds 已提交
4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108
EXPORT_SYMBOL(dev_add_pack);
EXPORT_SYMBOL(dev_alloc_name);
EXPORT_SYMBOL(dev_close);
EXPORT_SYMBOL(dev_get_by_flags);
EXPORT_SYMBOL(dev_get_by_index);
EXPORT_SYMBOL(dev_get_by_name);
EXPORT_SYMBOL(dev_open);
EXPORT_SYMBOL(dev_queue_xmit);
EXPORT_SYMBOL(dev_remove_pack);
EXPORT_SYMBOL(dev_set_allmulti);
EXPORT_SYMBOL(dev_set_promiscuity);
EXPORT_SYMBOL(dev_change_flags);
EXPORT_SYMBOL(dev_set_mtu);
EXPORT_SYMBOL(dev_set_mac_address);
EXPORT_SYMBOL(free_netdev);
EXPORT_SYMBOL(netdev_boot_setup_check);
EXPORT_SYMBOL(netdev_set_master);
EXPORT_SYMBOL(netdev_state_change);
EXPORT_SYMBOL(netif_receive_skb);
EXPORT_SYMBOL(netif_rx);
EXPORT_SYMBOL(register_gifconf);
EXPORT_SYMBOL(register_netdevice);
EXPORT_SYMBOL(register_netdevice_notifier);
EXPORT_SYMBOL(skb_checksum_help);
EXPORT_SYMBOL(synchronize_net);
EXPORT_SYMBOL(unregister_netdevice);
EXPORT_SYMBOL(unregister_netdevice_notifier);
EXPORT_SYMBOL(net_enable_timestamp);
EXPORT_SYMBOL(net_disable_timestamp);
EXPORT_SYMBOL(dev_get_flags);

#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
EXPORT_SYMBOL(br_handle_frame_hook);
EXPORT_SYMBOL(br_fdb_get_hook);
EXPORT_SYMBOL(br_fdb_put_hook);
#endif

#ifdef CONFIG_KMOD
EXPORT_SYMBOL(dev_load);
#endif

EXPORT_PER_CPU_SYMBOL(softnet_data);