bond_main.c 136.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
 * originally based on the dummy device.
 *
 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
 * Licensed under the GPL. Based on dummy.c, and eql.c devices.
 *
 * bonding.c: an Ethernet Bonding driver
 *
 * This is useful to talk to a Cisco EtherChannel compatible equipment:
 *	Cisco 5500
 *	Sun Trunking (Solaris)
 *	Alteon AceDirector Trunks
 *	Linux Bonding
 *	and probably many L2 switches ...
 *
 * How it works:
 *    ifconfig bond0 ipaddress netmask up
 *      will setup a network device, with an ip address.  No mac address
 *	will be assigned at this time.  The hw mac address will come from
 *	the first slave bonded to the channel.  All slaves will then use
 *	this hw mac address.
 *
 *    ifconfig bond0 down
 *         will release all slaves, marking them as down.
 *
 *    ifenslave bond0 eth0
 *	will attach eth0 to bond0 as a slave.  eth0 hw mac address will either
 *	a: be used as initial mac address
 *	b: if a hw mac address already is there, eth0's hw mac address
 *	   will then be set from bond0.
 *
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
42
#include <net/ip.h>
L
Linus Torvalds 已提交
43
#include <linux/ip.h>
44 45
#include <linux/tcp.h>
#include <linux/udp.h>
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/socket.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/bitops.h>
S
Stephen Hemminger 已提交
54
#include <linux/io.h>
L
Linus Torvalds 已提交
55
#include <asm/dma.h>
S
Stephen Hemminger 已提交
56
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
57 58 59
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
60
#include <linux/igmp.h>
L
Linus Torvalds 已提交
61 62 63 64 65 66 67 68 69 70 71
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/smp.h>
#include <linux/if_ether.h>
#include <net/arp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_bonding.h>
D
David Sterba 已提交
72
#include <linux/jiffies.h>
73
#include <linux/preempt.h>
J
Jay Vosburgh 已提交
74
#include <net/route.h>
75
#include <net/net_namespace.h>
76
#include <net/netns/generic.h>
77
#include <net/pkt_sched.h>
78
#include <linux/rculist.h>
79
#include <net/flow_dissector.h>
80
#include <net/switchdev.h>
81 82 83
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
L
Linus Torvalds 已提交
84

85 86
#include "bonding_priv.h"

L
Linus Torvalds 已提交
87 88 89 90 91
/*---------------------------- Module parameters ----------------------------*/

/* monitor all links that often (in milliseconds). <=0 disables monitoring */

static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
92
static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
93
static int num_peer_notif = 1;
94
static int miimon;
S
Stephen Hemminger 已提交
95 96
static int updelay;
static int downdelay;
L
Linus Torvalds 已提交
97
static int use_carrier	= 1;
S
Stephen Hemminger 已提交
98 99
static char *mode;
static char *primary;
100
static char *primary_reselect;
S
Stephen Hemminger 已提交
101
static char *lacp_rate;
102
static int min_links;
S
Stephen Hemminger 已提交
103 104
static char *ad_select;
static char *xmit_hash_policy;
105
static int arp_interval;
S
Stephen Hemminger 已提交
106 107
static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
108
static char *arp_all_targets;
S
Stephen Hemminger 已提交
109
static char *fail_over_mac;
110
static int all_slaves_active;
111
static struct bond_params bonding_defaults;
112
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
113
static int packets_per_slave = 1;
114
static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
L
Linus Torvalds 已提交
115 116 117

module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
118 119
module_param(tx_queues, int, 0);
MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
120
module_param_named(num_grat_arp, num_peer_notif, int, 0644);
121 122
MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
			       "failover event (alias of num_unsol_na)");
123
module_param_named(num_unsol_na, num_peer_notif, int, 0644);
124 125
MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
			       "failover event (alias of num_grat_arp)");
L
Linus Torvalds 已提交
126 127 128 129 130
module_param(miimon, int, 0);
MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
module_param(updelay, int, 0);
MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
module_param(downdelay, int, 0);
131 132
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
			    "in milliseconds");
L
Linus Torvalds 已提交
133
module_param(use_carrier, int, 0);
134
MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
135
			      "0 for off, 1 for on (default)");
L
Linus Torvalds 已提交
136
module_param(mode, charp, 0);
137
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
138 139 140
		       "1 for active-backup, 2 for balance-xor, "
		       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
		       "6 for balance-alb");
L
Linus Torvalds 已提交
141 142
module_param(primary, charp, 0);
MODULE_PARM_DESC(primary, "Primary network device to use");
143 144 145 146 147 148 149 150
module_param(primary_reselect, charp, 0);
MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
				   "once it comes up; "
				   "0 for always (default), "
				   "1 for only if speed of primary is "
				   "better, "
				   "2 for only on active slave "
				   "failure");
L
Linus Torvalds 已提交
151
module_param(lacp_rate, charp, 0);
152 153
MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
			    "0 for slow, 1 for fast");
154
module_param(ad_select, charp, 0);
Z
Zhu Yanjun 已提交
155
MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
156 157
			    "0 for stable (default), 1 for bandwidth, "
			    "2 for count");
158 159 160
module_param(min_links, int, 0);
MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");

161
module_param(xmit_hash_policy, charp, 0);
162
MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
163
				   "0 for layer 2 (default), 1 for layer 3+4, "
164 165
				   "2 for layer 2+3, 3 for encap layer 2+3, "
				   "4 for encap layer 3+4");
L
Linus Torvalds 已提交
166 167 168 169
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
170
module_param(arp_validate, charp, 0);
171 172 173
MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
			       "0 for none (default), 1 for active, "
			       "2 for backup, 3 for all");
174 175
module_param(arp_all_targets, charp, 0);
MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
176
module_param(fail_over_mac, charp, 0);
177 178 179
MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
				"the same MAC; 0 for none (default), "
				"1 for active, 2 for follow");
180
module_param(all_slaves_active, int, 0);
181
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
182
				     "by setting active flag for all slaves; "
183
				     "0 for never (default), 1 for always.");
184
module_param(resend_igmp, int, 0);
185 186
MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
			      "link failure");
187 188 189 190
module_param(packets_per_slave, int, 0);
MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
				    "mode; 0 for a random slave, 1 packet per "
				    "slave (default), >1 packets per slave.");
191 192 193 194
module_param(lp_interval, uint, 0);
MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
			      "the bonding driver sends learning packets to "
			      "each slaves peer switch. The default is 1.");
L
Linus Torvalds 已提交
195 196 197

/*----------------------------- Global variables ----------------------------*/

198
#ifdef CONFIG_NET_POLL_CONTROLLER
199
atomic_t netpoll_block_tx = ATOMIC_INIT(0);
200 201
#endif

202
unsigned int bond_net_id __read_mostly;
L
Linus Torvalds 已提交
203 204 205

/*-------------------------- Forward declarations ---------------------------*/

206
static int bond_init(struct net_device *bond_dev);
207
static void bond_uninit(struct net_device *bond_dev);
208 209
static void bond_get_stats(struct net_device *bond_dev,
			   struct rtnl_link_stats64 *stats);
210
static void bond_slave_arr_handler(struct work_struct *work);
211 212
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
				  int mod);
213
static void bond_netdev_notify_work(struct work_struct *work);
L
Linus Torvalds 已提交
214 215 216

/*---------------------------- General routines -----------------------------*/

217
const char *bond_mode_name(int mode)
L
Linus Torvalds 已提交
218
{
219 220 221 222 223
	static const char *names[] = {
		[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
		[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
		[BOND_MODE_XOR] = "load balancing (xor)",
		[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
S
Stephen Hemminger 已提交
224
		[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
225 226 227 228
		[BOND_MODE_TLB] = "transmit load balancing",
		[BOND_MODE_ALB] = "adaptive load balancing",
	};

229
	if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
L
Linus Torvalds 已提交
230
		return "unknown";
231 232

	return names[mode];
L
Linus Torvalds 已提交
233 234 235 236 237 238
}

/*---------------------------------- VLAN -----------------------------------*/

/**
 * bond_dev_queue_xmit - Prepare skb for xmit.
S
Stephen Hemminger 已提交
239
 *
L
Linus Torvalds 已提交
240 241 242 243
 * @bond: bond device that got this skb for tx.
 * @skb: hw accel VLAN tagged skb to transmit
 * @slave_dev: slave that is supposed to xmit this skbuff
 */
244
void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
S
Stephen Hemminger 已提交
245
			struct net_device *slave_dev)
L
Linus Torvalds 已提交
246
{
247
	skb->dev = slave_dev;
248

249
	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
250
		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
251
	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
252

253
	if (unlikely(netpoll_tx_running(bond->dev)))
254
		bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
255
	else
256
		dev_queue_xmit(skb);
L
Linus Torvalds 已提交
257 258
}

259
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
J
Jiri Pirko 已提交
260
 * We don't protect the slave list iteration with a lock because:
L
Linus Torvalds 已提交
261 262 263 264
 * a. This operation is performed in IOCTL context,
 * b. The operation is protected by the RTNL semaphore in the 8021q code,
 * c. Holding a lock with BH disabled while directly calling a base driver
 *    entry point is generally a BAD idea.
S
Stephen Hemminger 已提交
265
 *
L
Linus Torvalds 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278 279
 * The design of synchronization/protection for this operation in the 8021q
 * module is good for one or more VLAN devices over a single physical device
 * and cannot be extended for a teaming solution like bonding, so there is a
 * potential race condition here where a net device from the vlan group might
 * be referenced (either by a base driver or the 8021q code) while it is being
 * removed from the system. However, it turns out we're not making matters
 * worse, and if it works for regular VLAN usage it will work here too.
*/

/**
 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
 * @bond_dev: bonding net device that got called
 * @vid: vlan id being added
 */
280 281
static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
				__be16 proto, u16 vid)
L
Linus Torvalds 已提交
282
{
283
	struct bonding *bond = netdev_priv(bond_dev);
284
	struct slave *slave, *rollback_slave;
285
	struct list_head *iter;
286
	int res;
L
Linus Torvalds 已提交
287

288
	bond_for_each_slave(bond, slave, iter) {
289
		res = vlan_vid_add(slave->dev, proto, vid);
290 291
		if (res)
			goto unwind;
L
Linus Torvalds 已提交
292 293
	}

294
	return 0;
295 296

unwind:
297
	/* unwind to the slave that failed */
298
	bond_for_each_slave(bond, rollback_slave, iter) {
299 300 301 302 303
		if (rollback_slave == slave)
			break;

		vlan_vid_del(rollback_slave->dev, proto, vid);
	}
304 305

	return res;
L
Linus Torvalds 已提交
306 307 308 309 310 311 312
}

/**
 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
 * @bond_dev: bonding net device that got called
 * @vid: vlan id being removed
 */
313 314
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
				 __be16 proto, u16 vid)
L
Linus Torvalds 已提交
315
{
316
	struct bonding *bond = netdev_priv(bond_dev);
317
	struct list_head *iter;
L
Linus Torvalds 已提交
318 319
	struct slave *slave;

320
	bond_for_each_slave(bond, slave, iter)
321
		vlan_vid_del(slave->dev, proto, vid);
L
Linus Torvalds 已提交
322

323 324
	if (bond_is_lb(bond))
		bond_alb_clear_vlan(bond, vid);
325 326

	return 0;
L
Linus Torvalds 已提交
327 328 329 330
}

/*------------------------------- Link status -------------------------------*/

331
/* Set the carrier state for the master according to the state of its
332 333 334 335 336
 * slaves.  If any slaves are up, the master is up.  In 802.3ad mode,
 * do special 802.3ad magic.
 *
 * Returns zero if carrier state does not change, nonzero if it does.
 */
337
int bond_set_carrier(struct bonding *bond)
338
{
339
	struct list_head *iter;
340 341
	struct slave *slave;

342
	if (!bond_has_slaves(bond))
343 344
		goto down;

345
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
346 347
		return bond_3ad_set_carrier(bond);

348
	bond_for_each_slave(bond, slave, iter) {
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
		if (slave->link == BOND_LINK_UP) {
			if (!netif_carrier_ok(bond->dev)) {
				netif_carrier_on(bond->dev);
				return 1;
			}
			return 0;
		}
	}

down:
	if (netif_carrier_ok(bond->dev)) {
		netif_carrier_off(bond->dev);
		return 1;
	}
	return 0;
}

366
/* Get link speed and duplex from the slave's base driver
L
Linus Torvalds 已提交
367
 * using ethtool. If for some reason the call fails or the
368
 * values are invalid, set speed and duplex to -1,
369 370
 * and return. Return 1 if speed or duplex settings are
 * UNKNOWN; 0 otherwise.
L
Linus Torvalds 已提交
371
 */
372
static int bond_update_speed_duplex(struct slave *slave)
L
Linus Torvalds 已提交
373 374
{
	struct net_device *slave_dev = slave->dev;
375
	struct ethtool_link_ksettings ecmd;
376
	int res;
L
Linus Torvalds 已提交
377

378 379
	slave->speed = SPEED_UNKNOWN;
	slave->duplex = DUPLEX_UNKNOWN;
L
Linus Torvalds 已提交
380

381
	res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
382
	if (res < 0)
383
		return 1;
384
	if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
385
		return 1;
386
	switch (ecmd.base.duplex) {
L
Linus Torvalds 已提交
387 388 389 390
	case DUPLEX_FULL:
	case DUPLEX_HALF:
		break;
	default:
391
		return 1;
L
Linus Torvalds 已提交
392 393
	}

394 395
	slave->speed = ecmd.base.speed;
	slave->duplex = ecmd.base.duplex;
L
Linus Torvalds 已提交
396

397
	return 0;
L
Linus Torvalds 已提交
398 399
}

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
const char *bond_slave_link_status(s8 link)
{
	switch (link) {
	case BOND_LINK_UP:
		return "up";
	case BOND_LINK_FAIL:
		return "going down";
	case BOND_LINK_DOWN:
		return "down";
	case BOND_LINK_BACK:
		return "going back";
	default:
		return "unknown";
	}
}

416
/* if <dev> supports MII link status reporting, check its link status.
L
Linus Torvalds 已提交
417 418
 *
 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
S
Stephen Hemminger 已提交
419
 * depending upon the setting of the use_carrier parameter.
L
Linus Torvalds 已提交
420 421 422 423 424 425 426 427 428 429 430
 *
 * Return either BMSR_LSTATUS, meaning that the link is up (or we
 * can't tell and just pretend it is), or 0, meaning that the link is
 * down.
 *
 * If reporting is non-zero, instead of faking link up, return -1 if
 * both ETHTOOL and MII ioctls fail (meaning the device does not
 * support them).  If use_carrier is set, return whatever it says.
 * It'd be nice if there was a good way to tell if a driver supports
 * netif_carrier, but there really isn't.
 */
S
Stephen Hemminger 已提交
431 432
static int bond_check_dev_link(struct bonding *bond,
			       struct net_device *slave_dev, int reporting)
L
Linus Torvalds 已提交
433
{
434
	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
435
	int (*ioctl)(struct net_device *, struct ifreq *, int);
L
Linus Torvalds 已提交
436 437 438
	struct ifreq ifr;
	struct mii_ioctl_data *mii;

439 440 441
	if (!reporting && !netif_running(slave_dev))
		return 0;

442
	if (bond->params.use_carrier)
443
		return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
L
Linus Torvalds 已提交
444

445
	/* Try to get link status using Ethtool first. */
446 447 448
	if (slave_dev->ethtool_ops->get_link)
		return slave_dev->ethtool_ops->get_link(slave_dev) ?
			BMSR_LSTATUS : 0;
449

S
Stephen Hemminger 已提交
450
	/* Ethtool can't be used, fallback to MII ioctls. */
451
	ioctl = slave_ops->ndo_do_ioctl;
L
Linus Torvalds 已提交
452
	if (ioctl) {
453 454 455 456 457 458 459 460
		/* TODO: set pointer to correct ioctl on a per team member
		 *       bases to make this more efficient. that is, once
		 *       we determine the correct ioctl, we will always
		 *       call it and not the others for that team
		 *       member.
		 */

		/* We cannot assume that SIOCGMIIPHY will also read a
L
Linus Torvalds 已提交
461 462 463 464 465 466 467
		 * register; not all network drivers (e.g., e100)
		 * support that.
		 */

		/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
		strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
		mii = if_mii(&ifr);
A
Al Viro 已提交
468
		if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
L
Linus Torvalds 已提交
469
			mii->reg_num = MII_BMSR;
A
Al Viro 已提交
470
			if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
S
Stephen Hemminger 已提交
471
				return mii->val_out & BMSR_LSTATUS;
L
Linus Torvalds 已提交
472 473 474
		}
	}

475
	/* If reporting, report that either there's no dev->do_ioctl,
476
	 * or both SIOCGMIIREG and get_link failed (meaning that we
L
Linus Torvalds 已提交
477 478 479
	 * cannot report link status).  If not reporting, pretend
	 * we're ok.
	 */
S
Stephen Hemminger 已提交
480
	return reporting ? -1 : BMSR_LSTATUS;
L
Linus Torvalds 已提交
481 482 483 484
}

/*----------------------------- Multicast list ------------------------------*/

485
/* Push the promiscuity flag down to appropriate slaves */
486
static int bond_set_promiscuity(struct bonding *bond, int inc)
L
Linus Torvalds 已提交
487
{
488
	struct list_head *iter;
489
	int err = 0;
490

491
	if (bond_uses_primary(bond)) {
492
		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
493 494 495

		if (curr_active)
			err = dev_set_promiscuity(curr_active->dev, inc);
L
Linus Torvalds 已提交
496 497
	} else {
		struct slave *slave;
498

499
		bond_for_each_slave(bond, slave, iter) {
500 501 502
			err = dev_set_promiscuity(slave->dev, inc);
			if (err)
				return err;
L
Linus Torvalds 已提交
503 504
		}
	}
505
	return err;
L
Linus Torvalds 已提交
506 507
}

508
/* Push the allmulti flag down to all slaves */
509
static int bond_set_allmulti(struct bonding *bond, int inc)
L
Linus Torvalds 已提交
510
{
511
	struct list_head *iter;
512
	int err = 0;
513

514
	if (bond_uses_primary(bond)) {
515
		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
516 517 518

		if (curr_active)
			err = dev_set_allmulti(curr_active->dev, inc);
L
Linus Torvalds 已提交
519 520
	} else {
		struct slave *slave;
521

522
		bond_for_each_slave(bond, slave, iter) {
523 524 525
			err = dev_set_allmulti(slave->dev, inc);
			if (err)
				return err;
L
Linus Torvalds 已提交
526 527
		}
	}
528
	return err;
L
Linus Torvalds 已提交
529 530
}

531
/* Retrieve the list of registered multicast addresses for the bonding
532 533 534
 * device and retransmit an IGMP JOIN request to the current active
 * slave.
 */
535
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
536
{
537 538 539
	struct bonding *bond = container_of(work, struct bonding,
					    mcast_work.work);

540
	if (!rtnl_trylock()) {
541
		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
542
		return;
543
	}
544
	call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
545

546 547
	if (bond->igmp_retrans > 1) {
		bond->igmp_retrans--;
548
		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
549
	}
550
	rtnl_unlock();
551 552
}

553
/* Flush bond's hardware addresses from slave */
554
static void bond_hw_addr_flush(struct net_device *bond_dev,
S
Stephen Hemminger 已提交
555
			       struct net_device *slave_dev)
L
Linus Torvalds 已提交
556
{
557
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
558

559 560
	dev_uc_unsync(slave_dev, bond_dev);
	dev_mc_unsync(slave_dev, bond_dev);
L
Linus Torvalds 已提交
561

562
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
L
Linus Torvalds 已提交
563 564 565
		/* del lacpdu mc addr from mc list */
		u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;

566
		dev_mc_del(slave_dev, lacpdu_multicast);
L
Linus Torvalds 已提交
567 568 569 570 571
	}
}

/*--------------------------- Active slave change ---------------------------*/

572
/* Update the hardware address list and promisc/allmulti for the new and
573 574
 * old active slaves (if any).  Modes that are not using primary keep all
 * slaves up date at all times; only the modes that use primary need to call
575
 * this function to swap these settings during a failover.
L
Linus Torvalds 已提交
576
 */
577 578
static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
			      struct slave *old_active)
L
Linus Torvalds 已提交
579 580
{
	if (old_active) {
S
Stephen Hemminger 已提交
581
		if (bond->dev->flags & IFF_PROMISC)
L
Linus Torvalds 已提交
582 583
			dev_set_promiscuity(old_active->dev, -1);

S
Stephen Hemminger 已提交
584
		if (bond->dev->flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
585 586
			dev_set_allmulti(old_active->dev, -1);

587
		bond_hw_addr_flush(bond->dev, old_active->dev);
L
Linus Torvalds 已提交
588 589 590
	}

	if (new_active) {
591
		/* FIXME: Signal errors upstream. */
S
Stephen Hemminger 已提交
592
		if (bond->dev->flags & IFF_PROMISC)
L
Linus Torvalds 已提交
593 594
			dev_set_promiscuity(new_active->dev, 1);

S
Stephen Hemminger 已提交
595
		if (bond->dev->flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
596 597
			dev_set_allmulti(new_active->dev, 1);

598
		netif_addr_lock_bh(bond->dev);
599 600
		dev_uc_sync(new_active->dev, bond->dev);
		dev_mc_sync(new_active->dev, bond->dev);
601
		netif_addr_unlock_bh(bond->dev);
L
Linus Torvalds 已提交
602 603 604
	}
}

605 606 607 608 609 610 611
/**
 * bond_set_dev_addr - clone slave's address to bond
 * @bond_dev: bond net device
 * @slave_dev: slave net device
 *
 * Should be called with RTNL held.
 */
612 613
static int bond_set_dev_addr(struct net_device *bond_dev,
			     struct net_device *slave_dev)
614
{
615 616
	int err;

617 618
	netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n",
		   bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len);
619 620 621 622
	err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
	if (err)
		return err;

623 624 625
	memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
	bond_dev->addr_assign_type = NET_ADDR_STOLEN;
	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
626
	return 0;
627 628
}

629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
static struct slave *bond_get_old_active(struct bonding *bond,
					 struct slave *new_active)
{
	struct slave *slave;
	struct list_head *iter;

	bond_for_each_slave(bond, slave, iter) {
		if (slave == new_active)
			continue;

		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
			return slave;
	}

	return NULL;
}

646
/* bond_do_fail_over_mac
647 648 649
 *
 * Perform special MAC address swapping for fail_over_mac settings
 *
650
 * Called with RTNL
651 652 653 654 655
 */
static void bond_do_fail_over_mac(struct bonding *bond,
				  struct slave *new_active,
				  struct slave *old_active)
{
656 657
	u8 tmp_mac[MAX_ADDR_LEN];
	struct sockaddr_storage ss;
658 659 660 661
	int rv;

	switch (bond->params.fail_over_mac) {
	case BOND_FOM_ACTIVE:
662 663 664 665 666 667
		if (new_active) {
			rv = bond_set_dev_addr(bond->dev, new_active->dev);
			if (rv)
				netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
					   -rv, bond->dev->name);
		}
668 669
		break;
	case BOND_FOM_FOLLOW:
670
		/* if new_active && old_active, swap them
671 672 673 674 675 676
		 * if just old_active, do nothing (going to no active slave)
		 * if just new_active, set new_active to bond's MAC
		 */
		if (!new_active)
			return;

677 678 679
		if (!old_active)
			old_active = bond_get_old_active(bond, new_active);

680
		if (old_active) {
681 682 683 684 685 686
			bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
					  new_active->dev->addr_len);
			bond_hw_addr_copy(ss.__data,
					  old_active->dev->dev_addr,
					  old_active->dev->addr_len);
			ss.ss_family = new_active->dev->type;
687
		} else {
688 689 690
			bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
					  bond->dev->addr_len);
			ss.ss_family = bond->dev->type;
691 692
		}

693
		rv = dev_set_mac_address(new_active->dev,
694
					 (struct sockaddr *)&ss, NULL);
695
		if (rv) {
696 697
			netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
				   -rv, new_active->dev->name);
698 699 700 701 702 703
			goto out;
		}

		if (!old_active)
			goto out;

704 705 706
		bond_hw_addr_copy(ss.__data, tmp_mac,
				  new_active->dev->addr_len);
		ss.ss_family = old_active->dev->type;
707

708
		rv = dev_set_mac_address(old_active->dev,
709
					 (struct sockaddr *)&ss, NULL);
710
		if (rv)
711 712
			netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
				   -rv, new_active->dev->name);
713 714 715
out:
		break;
	default:
716 717
		netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
			   bond->params.fail_over_mac);
718 719 720 721 722
		break;
	}

}

723
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
724
{
725
	struct slave *prim = rtnl_dereference(bond->primary_slave);
726
	struct slave *curr = rtnl_dereference(bond->curr_active_slave);
727

728 729 730 731 732 733
	if (!prim || prim->link != BOND_LINK_UP) {
		if (!curr || curr->link != BOND_LINK_UP)
			return NULL;
		return curr;
	}

734 735
	if (bond->force_primary) {
		bond->force_primary = false;
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
		return prim;
	}

	if (!curr || curr->link != BOND_LINK_UP)
		return prim;

	/* At this point, prim and curr are both up */
	switch (bond->params.primary_reselect) {
	case BOND_PRI_RESELECT_ALWAYS:
		return prim;
	case BOND_PRI_RESELECT_BETTER:
		if (prim->speed < curr->speed)
			return curr;
		if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
			return curr;
		return prim;
	case BOND_PRI_RESELECT_FAILURE:
		return curr;
	default:
		netdev_err(bond->dev, "impossible primary_reselect %d\n",
			   bond->params.primary_reselect);
		return curr;
758 759
	}
}
760

L
Linus Torvalds 已提交
761
/**
762
 * bond_find_best_slave - select the best available slave to be the active one
L
Linus Torvalds 已提交
763 764 765 766
 * @bond: our bonding struct
 */
static struct slave *bond_find_best_slave(struct bonding *bond)
{
767
	struct slave *slave, *bestslave = NULL;
768
	struct list_head *iter;
L
Linus Torvalds 已提交
769 770
	int mintime = bond->params.updelay;

771 772 773
	slave = bond_choose_primary_or_current(bond);
	if (slave)
		return slave;
L
Linus Torvalds 已提交
774

775 776 777
	bond_for_each_slave(bond, slave, iter) {
		if (slave->link == BOND_LINK_UP)
			return slave;
778
		if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
779 780 781
		    slave->delay < mintime) {
			mintime = slave->delay;
			bestslave = slave;
L
Linus Torvalds 已提交
782 783 784 785 786 787
		}
	}

	return bestslave;
}

788 789
static bool bond_should_notify_peers(struct bonding *bond)
{
790 791 792 793 794
	struct slave *slave;

	rcu_read_lock();
	slave = rcu_dereference(bond->curr_active_slave);
	rcu_read_unlock();
795

796 797
	netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
		   slave ? slave->dev->name : "NULL");
798 799

	if (!slave || !bond->send_peer_notif ||
800
	    !netif_carrier_ok(bond->dev) ||
801 802 803 804 805 806
	    test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
		return false;

	return true;
}

L
Linus Torvalds 已提交
807 808 809 810 811 812 813 814 815 816 817 818 819
/**
 * change_active_interface - change the active slave into the specified one
 * @bond: our bonding struct
 * @new: the new slave to make the active one
 *
 * Set the new slave to the bond's settings and unset them on the old
 * curr_active_slave.
 * Setting include flags, mc-list, promiscuity, allmulti, etc.
 *
 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
 * because it is apparently the best available slave we have, even though its
 * updelay hasn't timed out yet.
 *
820
 * Caller must hold RTNL.
L
Linus Torvalds 已提交
821
 */
822
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
L
Linus Torvalds 已提交
823
{
824 825
	struct slave *old_active;

826 827 828
	ASSERT_RTNL();

	old_active = rtnl_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
829

S
Stephen Hemminger 已提交
830
	if (old_active == new_active)
L
Linus Torvalds 已提交
831 832 833
		return;

	if (new_active) {
834
		new_active->last_link_up = jiffies;
835

L
Linus Torvalds 已提交
836
		if (new_active->link == BOND_LINK_BACK) {
837
			if (bond_uses_primary(bond)) {
838 839 840
				netdev_info(bond->dev, "making interface %s the new active one %d ms earlier\n",
					    new_active->dev->name,
					    (bond->params.updelay - new_active->delay) * bond->params.miimon);
L
Linus Torvalds 已提交
841 842 843
			}

			new_active->delay = 0;
844 845
			bond_set_slave_link_state(new_active, BOND_LINK_UP,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
846

847
			if (BOND_MODE(bond) == BOND_MODE_8023AD)
L
Linus Torvalds 已提交
848 849
				bond_3ad_handle_link_change(new_active, BOND_LINK_UP);

850
			if (bond_is_lb(bond))
L
Linus Torvalds 已提交
851 852
				bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
		} else {
853
			if (bond_uses_primary(bond)) {
854 855
				netdev_info(bond->dev, "making interface %s the new active one\n",
					    new_active->dev->name);
L
Linus Torvalds 已提交
856 857 858 859
			}
		}
	}

860
	if (bond_uses_primary(bond))
861
		bond_hw_addr_swap(bond, new_active, old_active);
L
Linus Torvalds 已提交
862

863
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
864
		bond_alb_handle_active_change(bond, new_active);
865
		if (old_active)
866 867
			bond_set_slave_inactive_flags(old_active,
						      BOND_SLAVE_NOTIFY_NOW);
868
		if (new_active)
869 870
			bond_set_slave_active_flags(new_active,
						    BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
871
	} else {
872
		rcu_assign_pointer(bond->curr_active_slave, new_active);
L
Linus Torvalds 已提交
873
	}
J
Jay Vosburgh 已提交
874

875
	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
S
Stephen Hemminger 已提交
876
		if (old_active)
877 878
			bond_set_slave_inactive_flags(old_active,
						      BOND_SLAVE_NOTIFY_NOW);
J
Jay Vosburgh 已提交
879 880

		if (new_active) {
881 882
			bool should_notify_peers = false;

883 884
			bond_set_slave_active_flags(new_active,
						    BOND_SLAVE_NOTIFY_NOW);
885

886 887 888
			if (bond->params.fail_over_mac)
				bond_do_fail_over_mac(bond, new_active,
						      old_active);
889

890 891 892 893 894 895 896
			if (netif_running(bond->dev)) {
				bond->send_peer_notif =
					bond->params.num_peer_notif;
				should_notify_peers =
					bond_should_notify_peers(bond);
			}

897
			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
898
			if (should_notify_peers)
899 900
				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
							 bond->dev);
901
		}
J
Jay Vosburgh 已提交
902
	}
903

904
	/* resend IGMP joins since active slave has changed or
905 906
	 * all were sent on curr_active_slave.
	 * resend only if bond is brought up with the affected
907 908
	 * bonding modes and the retransmission is enabled
	 */
909
	if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
910
	    ((bond_uses_primary(bond) && new_active) ||
911
	     BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
912
		bond->igmp_retrans = bond->params.resend_igmp;
913
		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
914
	}
L
Linus Torvalds 已提交
915 916 917 918 919 920
}

/**
 * bond_select_active_slave - select a new active slave, if needed
 * @bond: our bonding struct
 *
S
Stephen Hemminger 已提交
921
 * This functions should be called when one of the following occurs:
L
Linus Torvalds 已提交
922 923 924 925
 * - The old curr_active_slave has been released or lost its link.
 * - The primary_slave has got its link back.
 * - A slave has got its link back and there's no old curr_active_slave.
 *
926
 * Caller must hold RTNL.
L
Linus Torvalds 已提交
927
 */
928
void bond_select_active_slave(struct bonding *bond)
L
Linus Torvalds 已提交
929 930
{
	struct slave *best_slave;
931
	int rv;
L
Linus Torvalds 已提交
932

933 934
	ASSERT_RTNL();

L
Linus Torvalds 已提交
935
	best_slave = bond_find_best_slave(bond);
936
	if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
L
Linus Torvalds 已提交
937
		bond_change_active_slave(bond, best_slave);
938 939 940 941
		rv = bond_set_carrier(bond);
		if (!rv)
			return;

Z
Zhang Shengju 已提交
942
		if (netif_carrier_ok(bond->dev))
943
			netdev_info(bond->dev, "first active interface up!\n");
Z
Zhang Shengju 已提交
944
		else
945
			netdev_info(bond->dev, "now running without any active interface!\n");
L
Linus Torvalds 已提交
946 947 948
	}
}

949
#ifdef CONFIG_NET_POLL_CONTROLLER
950
static inline int slave_enable_netpoll(struct slave *slave)
951
{
952 953
	struct netpoll *np;
	int err = 0;
954

955
	np = kzalloc(sizeof(*np), GFP_KERNEL);
956 957 958 959
	err = -ENOMEM;
	if (!np)
		goto out;

960
	err = __netpoll_setup(np, slave->dev);
961 962 963
	if (err) {
		kfree(np);
		goto out;
964
	}
965 966 967 968 969 970 971 972 973 974 975 976
	slave->np = np;
out:
	return err;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
	struct netpoll *np = slave->np;

	if (!np)
		return;

	slave->np = NULL;
977 978

	__netpoll_free(np);
979
}
980 981 982

static void bond_poll_controller(struct net_device *bond_dev)
{
983 984 985 986 987 988 989 990 991 992
	struct bonding *bond = netdev_priv(bond_dev);
	struct slave *slave = NULL;
	struct list_head *iter;
	struct ad_info ad_info;

	if (BOND_MODE(bond) == BOND_MODE_8023AD)
		if (bond_3ad_get_active_agg_info(bond, &ad_info))
			return;

	bond_for_each_slave_rcu(bond, slave, iter) {
993
		if (!bond_slave_is_up(slave))
994 995 996 997 998 999 1000 1001 1002 1003 1004
			continue;

		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			struct aggregator *agg =
			    SLAVE_AD_INFO(slave)->port.aggregator;

			if (agg &&
			    agg->aggregator_identifier != ad_info.aggregator_id)
				continue;
		}

1005
		netpoll_poll_dev(slave->dev);
1006
	}
1007 1008
}

1009
static void bond_netpoll_cleanup(struct net_device *bond_dev)
1010
{
1011
	struct bonding *bond = netdev_priv(bond_dev);
1012
	struct list_head *iter;
1013 1014
	struct slave *slave;

1015
	bond_for_each_slave(bond, slave, iter)
1016
		if (bond_slave_is_up(slave))
1017
			slave_disable_netpoll(slave);
1018
}
1019

1020
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1021 1022
{
	struct bonding *bond = netdev_priv(dev);
1023
	struct list_head *iter;
1024
	struct slave *slave;
1025
	int err = 0;
1026

1027
	bond_for_each_slave(bond, slave, iter) {
1028 1029
		err = slave_enable_netpoll(slave);
		if (err) {
1030
			bond_netpoll_cleanup(dev);
1031
			break;
1032 1033
		}
	}
1034
	return err;
1035
}
1036 1037 1038 1039 1040 1041 1042 1043
#else
static inline int slave_enable_netpoll(struct slave *slave)
{
	return 0;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
}
1044 1045 1046 1047 1048
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
}
#endif

L
Linus Torvalds 已提交
1049 1050
/*---------------------------------- IOCTL ----------------------------------*/

1051
static netdev_features_t bond_fix_features(struct net_device *dev,
1052
					   netdev_features_t features)
1053
{
1054
	struct bonding *bond = netdev_priv(dev);
1055
	struct list_head *iter;
1056
	netdev_features_t mask;
1057
	struct slave *slave;
1058

1059
	mask = features;
1060

1061
	features &= ~NETIF_F_ONE_FOR_ALL;
1062
	features |= NETIF_F_ALL_FOR_ALL;
1063

1064
	bond_for_each_slave(bond, slave, iter) {
1065 1066
		features = netdev_increment_features(features,
						     slave->dev->features,
1067 1068
						     mask);
	}
1069
	features = netdev_add_tso_features(features, mask);
1070 1071 1072 1073

	return features;
}

1074
#define BOND_VLAN_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1075 1076
				 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
				 NETIF_F_HIGHDMA | NETIF_F_LRO)
1077

1078 1079
#define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
				 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
1080

1081 1082
static void bond_compute_features(struct bonding *bond)
{
1083 1084
	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
					IFF_XMIT_DST_RELEASE_PERM;
1085
	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1086
	netdev_features_t enc_features  = BOND_ENC_FEATURES;
1087 1088 1089
	struct net_device *bond_dev = bond->dev;
	struct list_head *iter;
	struct slave *slave;
1090
	unsigned short max_hard_header_len = ETH_HLEN;
1091 1092
	unsigned int gso_max_size = GSO_MAX_SIZE;
	u16 gso_max_segs = GSO_MAX_SEGS;
1093

1094
	if (!bond_has_slaves(bond))
1095
		goto done;
1096
	vlan_features &= NETIF_F_ALL_FOR_ALL;
1097

1098
	bond_for_each_slave(bond, slave, iter) {
1099
		vlan_features = netdev_increment_features(vlan_features,
1100 1101
			slave->dev->vlan_features, BOND_VLAN_FEATURES);

1102 1103 1104
		enc_features = netdev_increment_features(enc_features,
							 slave->dev->hw_enc_features,
							 BOND_ENC_FEATURES);
1105
		dst_release_flag &= slave->dev->priv_flags;
1106 1107
		if (slave->dev->hard_header_len > max_hard_header_len)
			max_hard_header_len = slave->dev->hard_header_len;
1108 1109 1110

		gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
		gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
1111
	}
1112
	bond_dev->hard_header_len = max_hard_header_len;
1113

1114
done:
1115
	bond_dev->vlan_features = vlan_features;
1116 1117
	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
				    NETIF_F_GSO_UDP_L4;
1118 1119
	bond_dev->gso_max_segs = gso_max_segs;
	netif_set_gso_max_size(bond_dev, gso_max_size);
1120

1121 1122 1123 1124
	bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
	if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
	    dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
		bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1125

1126
	netdev_change_features(bond_dev);
1127 1128
}

1129 1130 1131
static void bond_setup_by_slave(struct net_device *bond_dev,
				struct net_device *slave_dev)
{
1132
	bond_dev->header_ops	    = slave_dev->header_ops;
1133 1134 1135 1136 1137 1138 1139 1140 1141

	bond_dev->type		    = slave_dev->type;
	bond_dev->hard_header_len   = slave_dev->hard_header_len;
	bond_dev->addr_len	    = slave_dev->addr_len;

	memcpy(bond_dev->broadcast, slave_dev->broadcast,
		slave_dev->addr_len);
}

1142
/* On bonding slaves other than the currently active slave, suppress
1143
 * duplicates except for alb non-mcast/bcast.
1144 1145
 */
static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1146 1147
					    struct slave *slave,
					    struct bonding *bond)
1148
{
1149
	if (bond_is_slave_inactive(slave)) {
1150
		if (BOND_MODE(bond) == BOND_MODE_ALB &&
1151 1152 1153 1154 1155 1156 1157 1158
		    skb->pkt_type != PACKET_BROADCAST &&
		    skb->pkt_type != PACKET_MULTICAST)
			return false;
		return true;
	}
	return false;
}

1159
static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1160
{
1161
	struct sk_buff *skb = *pskb;
1162
	struct slave *slave;
1163
	struct bonding *bond;
1164 1165
	int (*recv_probe)(const struct sk_buff *, struct bonding *,
			  struct slave *);
1166
	int ret = RX_HANDLER_ANOTHER;
1167

1168 1169 1170 1171 1172
	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return RX_HANDLER_CONSUMED;

	*pskb = skb;
1173

J
Jiri Pirko 已提交
1174 1175
	slave = bond_slave_get_rcu(skb->dev);
	bond = slave->bond;
1176

1177
	recv_probe = READ_ONCE(bond->recv_probe);
1178
	if (recv_probe) {
1179 1180 1181 1182
		ret = recv_probe(skb, bond, slave);
		if (ret == RX_HANDLER_CONSUMED) {
			consume_skb(skb);
			return ret;
1183 1184 1185
		}
	}

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
	/* Link-local multicast packets should be passed to the
	 * stack on the link they arrive as well as pass them to the
	 * bond-master device. These packets are mostly usable when
	 * stack receives it with the link on which they arrive
	 * (e.g. LLDP) they also must be available on master. Some of
	 * the use cases include (but are not limited to): LLDP agents
	 * that must be able to operate both on enslaved interfaces as
	 * well as on bonds themselves; linux bridges that must be able
	 * to process/pass BPDUs from attached bonds when any kind of
	 * STP version is enabled on the network.
	 */
	if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);

		if (nskb) {
			nskb->dev = bond->dev;
M
Mahesh Bandewar 已提交
1202
			nskb->queue_mapping = 0;
1203 1204
			netif_rx(nskb);
		}
1205
		return RX_HANDLER_PASS;
1206
	}
Z
Zhang Shengju 已提交
1207
	if (bond_should_deliver_exact_match(skb, slave, bond))
1208
		return RX_HANDLER_EXACT;
1209

J
Jiri Pirko 已提交
1210
	skb->dev = bond->dev;
1211

1212
	if (BOND_MODE(bond) == BOND_MODE_ALB &&
J
Jiri Pirko 已提交
1213
	    bond->dev->priv_flags & IFF_BRIDGE_PORT &&
1214 1215
	    skb->pkt_type == PACKET_HOST) {

1216 1217 1218
		if (unlikely(skb_cow_head(skb,
					  skb->data - skb_mac_header(skb)))) {
			kfree_skb(skb);
1219
			return RX_HANDLER_CONSUMED;
1220
		}
1221 1222
		bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
				  bond->dev->addr_len);
1223 1224
	}

1225
	return ret;
1226 1227
}

1228
static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1229
{
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
	switch (BOND_MODE(bond)) {
	case BOND_MODE_ROUNDROBIN:
		return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
	case BOND_MODE_ACTIVEBACKUP:
		return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
	case BOND_MODE_BROADCAST:
		return NETDEV_LAG_TX_TYPE_BROADCAST;
	case BOND_MODE_XOR:
	case BOND_MODE_8023AD:
		return NETDEV_LAG_TX_TYPE_HASH;
	default:
		return NETDEV_LAG_TX_TYPE_UNKNOWN;
	}
}

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
					       enum netdev_lag_tx_type type)
{
	if (type != NETDEV_LAG_TX_TYPE_HASH)
		return NETDEV_LAG_HASH_NONE;

	switch (bond->params.xmit_policy) {
	case BOND_XMIT_POLICY_LAYER2:
		return NETDEV_LAG_HASH_L2;
	case BOND_XMIT_POLICY_LAYER34:
		return NETDEV_LAG_HASH_L34;
	case BOND_XMIT_POLICY_LAYER23:
		return NETDEV_LAG_HASH_L23;
	case BOND_XMIT_POLICY_ENCAP23:
		return NETDEV_LAG_HASH_E23;
	case BOND_XMIT_POLICY_ENCAP34:
		return NETDEV_LAG_HASH_E34;
	default:
		return NETDEV_LAG_HASH_UNKNOWN;
	}
}

1267 1268
static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
				      struct netlink_ext_ack *extack)
1269 1270
{
	struct netdev_lag_upper_info lag_upper_info;
1271
	enum netdev_lag_tx_type type;
1272

1273 1274 1275
	type = bond_lag_tx_type(bond);
	lag_upper_info.tx_type = type;
	lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1276 1277 1278

	return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
					    &lag_upper_info, extack);
1279 1280
}

1281
static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1282
{
1283 1284
	netdev_upper_dev_unlink(slave->dev, bond->dev);
	slave->dev->flags &= ~IFF_SLAVE;
1285 1286
}

1287 1288 1289 1290
static struct slave *bond_alloc_slave(struct bonding *bond)
{
	struct slave *slave = NULL;

Z
Zhang Shengju 已提交
1291
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1292 1293 1294
	if (!slave)
		return NULL;

1295
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1296 1297 1298 1299 1300 1301 1302
		SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
					       GFP_KERNEL);
		if (!SLAVE_AD_INFO(slave)) {
			kfree(slave);
			return NULL;
		}
	}
1303 1304
	INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);

1305 1306 1307 1308 1309 1310 1311
	return slave;
}

static void bond_free_slave(struct slave *slave)
{
	struct bonding *bond = bond_get_bond_by_slave(slave);

1312
	cancel_delayed_work_sync(&slave->notify_work);
1313
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1314 1315 1316 1317 1318
		kfree(SLAVE_AD_INFO(slave));

	kfree(slave);
}

1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
{
	info->bond_mode = BOND_MODE(bond);
	info->miimon = bond->params.miimon;
	info->num_slaves = bond->slave_cnt;
}

static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
{
	strcpy(info->slave_name, slave->dev->name);
	info->link = slave->link;
	info->state = bond_slave_state(slave);
	info->link_failure_count = slave->link_failure_count;
}

1334 1335
static void bond_netdev_notify_work(struct work_struct *_work)
{
1336 1337 1338 1339 1340
	struct slave *slave = container_of(_work, struct slave,
					   notify_work.work);

	if (rtnl_trylock()) {
		struct netdev_bonding_info binfo;
1341

1342 1343 1344 1345 1346 1347 1348
		bond_fill_ifslave(slave, &binfo.slave);
		bond_fill_ifbond(slave->bond, &binfo.master);
		netdev_bonding_info_change(slave->dev, &binfo);
		rtnl_unlock();
	} else {
		queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
	}
1349 1350 1351 1352
}

void bond_queue_slave_event(struct slave *slave)
{
1353
	queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1354 1355
}

1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
void bond_lower_state_changed(struct slave *slave)
{
	struct netdev_lag_lower_state_info info;

	info.link_up = slave->link == BOND_LINK_UP ||
		       slave->link == BOND_LINK_FAIL;
	info.tx_enabled = bond_is_active_slave(slave);
	netdev_lower_state_changed(slave->dev, &info);
}

L
Linus Torvalds 已提交
1366
/* enslave device <slave> to bond device <master> */
D
David Ahern 已提交
1367 1368
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
		 struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
1369
{
1370
	struct bonding *bond = netdev_priv(bond_dev);
1371
	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1372
	struct slave *new_slave = NULL, *prev_slave;
1373
	struct sockaddr_storage ss;
L
Linus Torvalds 已提交
1374
	int link_reporting;
1375
	int res = 0, i;
L
Linus Torvalds 已提交
1376

1377 1378 1379
	if (!bond->params.use_carrier &&
	    slave_dev->ethtool_ops->get_link == NULL &&
	    slave_ops->ndo_do_ioctl == NULL) {
1380 1381
		netdev_warn(bond_dev, "no link monitoring support for %s\n",
			    slave_dev->name);
L
Linus Torvalds 已提交
1382 1383
	}

M
Mahesh Bandewar 已提交
1384 1385
	/* already in-use? */
	if (netdev_is_rx_handler_busy(slave_dev)) {
1386
		NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved");
M
Mahesh Bandewar 已提交
1387 1388
		netdev_err(bond_dev,
			   "Error: Device is in use and cannot be enslaved\n");
L
Linus Torvalds 已提交
1389 1390 1391
		return -EBUSY;
	}

1392
	if (bond_dev == slave_dev) {
1393
		NL_SET_ERR_MSG(extack, "Cannot enslave bond to itself.");
1394
		netdev_err(bond_dev, "cannot enslave bond to itself.\n");
1395 1396 1397
		return -EPERM;
	}

L
Linus Torvalds 已提交
1398 1399 1400
	/* vlan challenged mutual exclusion */
	/* no need to lock since we're protected by rtnl_lock */
	if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1401 1402
		netdev_dbg(bond_dev, "%s is NETIF_F_VLAN_CHALLENGED\n",
			   slave_dev->name);
1403
		if (vlan_uses_dev(bond_dev)) {
1404
			NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond");
1405 1406
			netdev_err(bond_dev, "Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
				   slave_dev->name, bond_dev->name);
L
Linus Torvalds 已提交
1407 1408
			return -EPERM;
		} else {
1409 1410 1411
			netdev_warn(bond_dev, "enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
				    slave_dev->name, slave_dev->name,
				    bond_dev->name);
L
Linus Torvalds 已提交
1412 1413
		}
	} else {
1414 1415
		netdev_dbg(bond_dev, "%s is !NETIF_F_VLAN_CHALLENGED\n",
			   slave_dev->name);
L
Linus Torvalds 已提交
1416 1417
	}

1418
	/* Old ifenslave binaries are no longer supported.  These can
S
Stephen Hemminger 已提交
1419
	 * be identified with moderate accuracy by the state of the slave:
1420 1421 1422
	 * the current ifenslave will set the interface down prior to
	 * enslaving it; the old ifenslave will not.
	 */
Y
yzhu1 已提交
1423
	if (slave_dev->flags & IFF_UP) {
1424
		NL_SET_ERR_MSG(extack, "Device can not be enslaved while up");
1425 1426
		netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n",
			   slave_dev->name);
1427
		return -EPERM;
1428
	}
L
Linus Torvalds 已提交
1429

1430 1431 1432 1433 1434 1435 1436
	/* set bonding device ether type by slave - bonding netdevices are
	 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
	 * there is a need to override some of the type dependent attribs/funcs.
	 *
	 * bond ether type mutual exclusion - don't allow slaves of dissimilar
	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
	 */
1437
	if (!bond_has_slaves(bond)) {
1438
		if (bond_dev->type != slave_dev->type) {
1439 1440
			netdev_dbg(bond_dev, "change device type from %d to %d\n",
				   bond_dev->type, slave_dev->type);
1441

1442 1443
			res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
						       bond_dev);
1444 1445
			res = notifier_to_errno(res);
			if (res) {
1446
				netdev_err(bond_dev, "refused to change device type\n");
1447
				return -EBUSY;
1448
			}
1449

1450
			/* Flush unicast and multicast addresses */
1451
			dev_uc_flush(bond_dev);
1452
			dev_mc_flush(bond_dev);
1453

1454 1455
			if (slave_dev->type != ARPHRD_ETHER)
				bond_setup_by_slave(bond_dev, slave_dev);
1456
			else {
1457
				ether_setup(bond_dev);
1458 1459
				bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
			}
1460

1461 1462
			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
						 bond_dev);
1463
		}
1464
	} else if (bond_dev->type != slave_dev->type) {
1465
		NL_SET_ERR_MSG(extack, "Device type is different from other slaves");
1466 1467
		netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
			   slave_dev->name, slave_dev->type, bond_dev->type);
1468
		return -EINVAL;
1469 1470
	}

1471 1472
	if (slave_dev->type == ARPHRD_INFINIBAND &&
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1473
		NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves");
1474 1475 1476 1477 1478 1479 1480 1481
		netdev_warn(bond_dev, "Type (%d) supports only active-backup mode\n",
			    slave_dev->type);
		res = -EOPNOTSUPP;
		goto err_undo_flags;
	}

	if (!slave_ops->ndo_set_mac_address ||
	    slave_dev->type == ARPHRD_INFINIBAND) {
1482
		netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
1483 1484 1485
		if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
		    bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
			if (!bond_has_slaves(bond)) {
1486
				bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1487
				netdev_warn(bond_dev, "Setting fail_over_mac to active for active-backup mode\n");
1488
			} else {
1489
				NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1490
				netdev_err(bond_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
1491 1492
				res = -EOPNOTSUPP;
				goto err_undo_flags;
1493
			}
1494
		}
L
Linus Torvalds 已提交
1495 1496
	}

1497 1498
	call_netdevice_notifiers(NETDEV_JOIN, slave_dev);

1499
	/* If this is the first slave, then we need to set the master's hardware
1500 1501
	 * address to be the same as the slave's.
	 */
1502
	if (!bond_has_slaves(bond) &&
1503 1504 1505 1506 1507
	    bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
		res = bond_set_dev_addr(bond->dev, slave_dev);
		if (res)
			goto err_undo_flags;
	}
1508

1509
	new_slave = bond_alloc_slave(bond);
L
Linus Torvalds 已提交
1510 1511 1512 1513
	if (!new_slave) {
		res = -ENOMEM;
		goto err_undo_flags;
	}
1514

1515 1516
	new_slave->bond = bond;
	new_slave->dev = slave_dev;
1517
	/* Set the new_slave's queue_id to be zero.  Queue ID mapping
1518 1519 1520 1521
	 * is set via sysfs or module option if desired.
	 */
	new_slave->queue_id = 0;

1522 1523 1524 1525
	/* Save slave's original mtu and then set it to match the bond */
	new_slave->original_mtu = slave_dev->mtu;
	res = dev_set_mtu(slave_dev, bond->dev->mtu);
	if (res) {
1526
		netdev_dbg(bond_dev, "Error %d calling dev_set_mtu\n", res);
1527 1528 1529
		goto err_free;
	}

1530
	/* Save slave's original ("permanent") mac address for modes
1531 1532 1533
	 * that need it, and for restoring it upon release, and then
	 * set it to the master's address
	 */
1534 1535
	bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
			  slave_dev->addr_len);
L
Linus Torvalds 已提交
1536

1537
	if (!bond->params.fail_over_mac ||
1538
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1539
		/* Set slave to master's mac address.  The application already
1540 1541
		 * set the master's mac address to that of the first slave
		 */
1542 1543
		memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
		ss.ss_family = slave_dev->type;
1544 1545
		res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
					  extack);
1546
		if (res) {
1547
			netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
1548
			goto err_restore_mtu;
1549
		}
1550
	}
L
Linus Torvalds 已提交
1551

1552 1553 1554
	/* set slave flag before open to prevent IPv6 addrconf */
	slave_dev->flags |= IFF_SLAVE;

1555
	/* open the slave since the application closed it */
1556
	res = dev_open(slave_dev, extack);
1557
	if (res) {
1558
		netdev_dbg(bond_dev, "Opening slave %s failed\n", slave_dev->name);
1559
		goto err_restore_mac;
L
Linus Torvalds 已提交
1560 1561
	}

1562
	slave_dev->priv_flags |= IFF_BONDING;
1563 1564
	/* initialize slave stats */
	dev_get_stats(new_slave->dev, &new_slave->slave_stats);
L
Linus Torvalds 已提交
1565

1566
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
1567 1568 1569 1570
		/* bond_alb_init_slave() must be called before all other stages since
		 * it might fail and we do not want to have to undo everything
		 */
		res = bond_alb_init_slave(bond, new_slave);
S
Stephen Hemminger 已提交
1571
		if (res)
1572
			goto err_close;
L
Linus Torvalds 已提交
1573 1574
	}

1575 1576
	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
	if (res) {
1577 1578
		netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
			   slave_dev->name);
1579
		goto err_close;
1580
	}
L
Linus Torvalds 已提交
1581

1582
	prev_slave = bond_last_slave(bond);
L
Linus Torvalds 已提交
1583 1584 1585 1586

	new_slave->delay = 0;
	new_slave->link_failure_count = 0;

1587 1588
	if (bond_update_speed_duplex(new_slave) &&
	    bond_needs_speed_duplex(bond))
1589
		new_slave->link = BOND_LINK_DOWN;
1590

1591
	new_slave->last_rx = jiffies -
1592
		(msecs_to_jiffies(bond->params.arp_interval) + 1);
1593
	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1594
		new_slave->target_last_arp_rx[i] = new_slave->last_rx;
1595

L
Linus Torvalds 已提交
1596 1597 1598 1599
	if (bond->params.miimon && !bond->params.use_carrier) {
		link_reporting = bond_check_dev_link(bond, slave_dev, 1);

		if ((link_reporting == -1) && !bond->params.arp_interval) {
1600
			/* miimon is set but a bonded network driver
L
Linus Torvalds 已提交
1601 1602 1603 1604 1605 1606 1607
			 * does not support ETHTOOL/MII and
			 * arp_interval is not set.  Note: if
			 * use_carrier is enabled, we will never go
			 * here (because netif_carrier is always
			 * supported); thus, we don't need to change
			 * the messages for netif_carrier.
			 */
1608 1609
			netdev_warn(bond_dev, "MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
				    slave_dev->name);
L
Linus Torvalds 已提交
1610 1611
		} else if (link_reporting == -1) {
			/* unable get link status using mii/ethtool */
1612 1613
			netdev_warn(bond_dev, "can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
				    slave_dev->name);
L
Linus Torvalds 已提交
1614 1615 1616 1617
		}
	}

	/* check for initial state */
1618
	new_slave->link = BOND_LINK_NOCHANGE;
1619 1620 1621
	if (bond->params.miimon) {
		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
			if (bond->params.updelay) {
1622
				bond_set_slave_link_state(new_slave,
1623 1624
							  BOND_LINK_BACK,
							  BOND_SLAVE_NOTIFY_NOW);
1625 1626
				new_slave->delay = bond->params.updelay;
			} else {
1627
				bond_set_slave_link_state(new_slave,
1628 1629
							  BOND_LINK_UP,
							  BOND_SLAVE_NOTIFY_NOW);
1630
			}
L
Linus Torvalds 已提交
1631
		} else {
1632 1633
			bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1634
		}
1635
	} else if (bond->params.arp_interval) {
1636 1637
		bond_set_slave_link_state(new_slave,
					  (netif_carrier_ok(slave_dev) ?
1638 1639
					  BOND_LINK_UP : BOND_LINK_DOWN),
					  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1640
	} else {
1641 1642
		bond_set_slave_link_state(new_slave, BOND_LINK_UP,
					  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1643 1644
	}

1645
	if (new_slave->link != BOND_LINK_DOWN)
1646
		new_slave->last_link_up = jiffies;
1647 1648 1649
	netdev_dbg(bond_dev, "Initial state of slave_dev is BOND_LINK_%s\n",
		   new_slave->link == BOND_LINK_DOWN ? "DOWN" :
		   (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1650

1651
	if (bond_uses_primary(bond) && bond->params.primary[0]) {
L
Linus Torvalds 已提交
1652
		/* if there is a primary slave, remember it */
1653
		if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1654
			rcu_assign_pointer(bond->primary_slave, new_slave);
1655 1656
			bond->force_primary = true;
		}
L
Linus Torvalds 已提交
1657 1658
	}

1659
	switch (BOND_MODE(bond)) {
L
Linus Torvalds 已提交
1660
	case BOND_MODE_ACTIVEBACKUP:
1661 1662
		bond_set_slave_inactive_flags(new_slave,
					      BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1663 1664 1665 1666 1667 1668
		break;
	case BOND_MODE_8023AD:
		/* in 802.3ad mode, the internal mechanism
		 * will activate the slaves in the selected
		 * aggregator
		 */
1669
		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1670
		/* if this is the first slave */
1671
		if (!prev_slave) {
1672
			SLAVE_AD_INFO(new_slave)->id = 1;
L
Linus Torvalds 已提交
1673 1674 1675
			/* Initialize AD with the number of times that the AD timer is called in 1 second
			 * can be called only after the mac address of the bond is set
			 */
1676
			bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
L
Linus Torvalds 已提交
1677
		} else {
1678 1679
			SLAVE_AD_INFO(new_slave)->id =
				SLAVE_AD_INFO(prev_slave)->id + 1;
L
Linus Torvalds 已提交
1680 1681 1682 1683 1684 1685
		}

		bond_3ad_bind_slave(new_slave);
		break;
	case BOND_MODE_TLB:
	case BOND_MODE_ALB:
J
Jiri Pirko 已提交
1686
		bond_set_active_slave(new_slave);
1687
		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1688 1689
		break;
	default:
1690
		netdev_dbg(bond_dev, "This slave is always active in trunk mode\n");
L
Linus Torvalds 已提交
1691 1692

		/* always active in trunk mode */
J
Jiri Pirko 已提交
1693
		bond_set_active_slave(new_slave);
L
Linus Torvalds 已提交
1694 1695 1696 1697 1698

		/* In trunking mode there is little meaning to curr_active_slave
		 * anyway (it holds no special properties of the bond device),
		 * so we can change it without calling change_active_interface()
		 */
1699 1700
		if (!rcu_access_pointer(bond->curr_active_slave) &&
		    new_slave->link == BOND_LINK_UP)
1701
			rcu_assign_pointer(bond->curr_active_slave, new_slave);
S
Stephen Hemminger 已提交
1702

L
Linus Torvalds 已提交
1703 1704 1705
		break;
	} /* switch(bond_mode) */

1706
#ifdef CONFIG_NET_POLL_CONTROLLER
1707
	if (bond->dev->npinfo) {
1708
		if (slave_enable_netpoll(new_slave)) {
1709
			netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
1710
			res = -EBUSY;
1711
			goto err_detach;
1712
		}
1713 1714
	}
#endif
1715

1716 1717 1718
	if (!(bond_dev->features & NETIF_F_LRO))
		dev_disable_lro(slave_dev);

J
Jiri Pirko 已提交
1719 1720 1721
	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
					 new_slave);
	if (res) {
1722
		netdev_dbg(bond_dev, "Error %d calling netdev_rx_handler_register\n", res);
1723
		goto err_detach;
J
Jiri Pirko 已提交
1724 1725
	}

1726
	res = bond_master_upper_dev_link(bond, new_slave, extack);
1727
	if (res) {
1728
		netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res);
1729 1730 1731
		goto err_unregister;
	}

1732 1733
	res = bond_sysfs_slave_add(new_slave);
	if (res) {
1734
		netdev_dbg(bond_dev, "Error %d calling bond_sysfs_slave_add\n", res);
1735 1736 1737
		goto err_upper_unlink;
	}

1738 1739
	bond->nest_level = dev_get_nest_level(bond_dev) + 1;

1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
	/* If the mode uses primary, then the following is handled by
	 * bond_change_active_slave().
	 */
	if (!bond_uses_primary(bond)) {
		/* set promiscuity level to new slave */
		if (bond_dev->flags & IFF_PROMISC) {
			res = dev_set_promiscuity(slave_dev, 1);
			if (res)
				goto err_sysfs_del;
		}

		/* set allmulti level to new slave */
		if (bond_dev->flags & IFF_ALLMULTI) {
			res = dev_set_allmulti(slave_dev, 1);
1754 1755 1756
			if (res) {
				if (bond_dev->flags & IFF_PROMISC)
					dev_set_promiscuity(slave_dev, -1);
1757
				goto err_sysfs_del;
1758
			}
1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
		}

		netif_addr_lock_bh(bond_dev);
		dev_mc_sync_multiple(slave_dev, bond_dev);
		dev_uc_sync_multiple(slave_dev, bond_dev);
		netif_addr_unlock_bh(bond_dev);

		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			/* add lacpdu mc addr to mc list */
			u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;

			dev_mc_add(slave_dev, lacpdu_multicast);
		}
	}

1774 1775 1776 1777
	bond->slave_cnt++;
	bond_compute_features(bond);
	bond_set_carrier(bond);

1778
	if (bond_uses_primary(bond)) {
1779
		block_netpoll_tx();
1780
		bond_select_active_slave(bond);
1781
		unblock_netpoll_tx();
1782
	}
1783

1784
	if (bond_mode_can_use_xmit_hash(bond))
1785 1786
		bond_update_slave_arr(bond, NULL);

1787

1788 1789 1790 1791
	netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
		    slave_dev->name,
		    bond_is_active_slave(new_slave) ? "an active" : "a backup",
		    new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
L
Linus Torvalds 已提交
1792 1793

	/* enslave is successful */
1794
	bond_queue_slave_event(new_slave);
L
Linus Torvalds 已提交
1795 1796 1797
	return 0;

/* Undo stages on error */
1798 1799 1800
err_sysfs_del:
	bond_sysfs_slave_del(new_slave);

1801
err_upper_unlink:
1802
	bond_upper_dev_unlink(bond, new_slave);
1803

1804 1805 1806
err_unregister:
	netdev_rx_handler_unregister(slave_dev);

1807
err_detach:
1808
	vlan_vids_del_by_dev(slave_dev, bond_dev);
1809 1810
	if (rcu_access_pointer(bond->primary_slave) == new_slave)
		RCU_INIT_POINTER(bond->primary_slave, NULL);
1811
	if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
1812
		block_netpoll_tx();
1813
		bond_change_active_slave(bond, NULL);
1814
		bond_select_active_slave(bond);
1815
		unblock_netpoll_tx();
1816
	}
1817 1818
	/* either primary_slave or curr_active_slave might've changed */
	synchronize_rcu();
1819
	slave_disable_netpoll(new_slave);
1820

L
Linus Torvalds 已提交
1821
err_close:
1822
	slave_dev->priv_flags &= ~IFF_BONDING;
L
Linus Torvalds 已提交
1823 1824 1825
	dev_close(slave_dev);

err_restore_mac:
1826
	slave_dev->flags &= ~IFF_SLAVE;
1827
	if (!bond->params.fail_over_mac ||
1828
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1829 1830 1831 1832
		/* XXX TODO - fom follow mode needs to change master's
		 * MAC if this slave's MAC is in use by the bond, or at
		 * least print a warning.
		 */
1833 1834 1835
		bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
				  new_slave->dev->addr_len);
		ss.ss_family = slave_dev->type;
1836
		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
1837
	}
L
Linus Torvalds 已提交
1838

1839 1840 1841
err_restore_mtu:
	dev_set_mtu(slave_dev, new_slave->original_mtu);

L
Linus Torvalds 已提交
1842
err_free:
1843
	bond_free_slave(new_slave);
L
Linus Torvalds 已提交
1844 1845

err_undo_flags:
1846
	/* Enslave of first slave has failed and we need to fix master's mac */
1847 1848 1849 1850 1851
	if (!bond_has_slaves(bond)) {
		if (ether_addr_equal_64bits(bond_dev->dev_addr,
					    slave_dev->dev_addr))
			eth_hw_addr_random(bond_dev);
		if (bond_dev->type != ARPHRD_ETHER) {
1852
			dev_close(bond_dev);
1853 1854 1855 1856 1857
			ether_setup(bond_dev);
			bond_dev->flags |= IFF_MASTER;
			bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
		}
	}
S
Stephen Hemminger 已提交
1858

L
Linus Torvalds 已提交
1859 1860 1861
	return res;
}

1862
/* Try to release the slave device <slave> from the bond device <master>
L
Linus Torvalds 已提交
1863
 * It is legal to access curr_active_slave without a lock because all the function
1864
 * is RTNL-locked. If "all" is true it means that the function is being called
1865
 * while destroying a bond interface and all slaves are being released.
L
Linus Torvalds 已提交
1866 1867 1868 1869 1870 1871 1872
 *
 * The rules for slave state should be:
 *   for Active/Backup:
 *     Active stays on all backups go down
 *   for Bonded connections:
 *     The first up interface should be left on and all others downed.
 */
1873 1874
static int __bond_release_one(struct net_device *bond_dev,
			      struct net_device *slave_dev,
1875
			      bool all, bool unregister)
L
Linus Torvalds 已提交
1876
{
1877
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
1878
	struct slave *slave, *oldcurrent;
1879
	struct sockaddr_storage ss;
1880
	int old_flags = bond_dev->flags;
1881
	netdev_features_t old_features = bond_dev->features;
L
Linus Torvalds 已提交
1882 1883 1884

	/* slave is not a slave or master is not master of this slave */
	if (!(slave_dev->flags & IFF_SLAVE) ||
1885
	    !netdev_has_upper_dev(slave_dev, bond_dev)) {
1886
		netdev_dbg(bond_dev, "cannot release %s\n",
1887
			   slave_dev->name);
L
Linus Torvalds 已提交
1888 1889 1890
		return -EINVAL;
	}

1891
	block_netpoll_tx();
L
Linus Torvalds 已提交
1892 1893 1894 1895

	slave = bond_get_slave_by_dev(bond, slave_dev);
	if (!slave) {
		/* not a slave of this bond */
1896 1897
		netdev_info(bond_dev, "%s not enslaved\n",
			    slave_dev->name);
1898
		unblock_netpoll_tx();
L
Linus Torvalds 已提交
1899 1900 1901
		return -EINVAL;
	}

1902 1903
	bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);

1904 1905
	bond_sysfs_slave_del(slave);

1906 1907 1908
	/* recompute stats just before removing the slave */
	bond_get_stats(bond->dev, &bond->bond_stats);

1909
	bond_upper_dev_unlink(bond, slave);
J
Jiri Pirko 已提交
1910 1911 1912 1913 1914
	/* unregister rx_handler early so bond_handle_frame wouldn't be called
	 * for this slave anymore.
	 */
	netdev_rx_handler_unregister(slave_dev);

1915
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
L
Linus Torvalds 已提交
1916 1917
		bond_3ad_unbind_slave(slave);

1918
	if (bond_mode_can_use_xmit_hash(bond))
1919 1920
		bond_update_slave_arr(bond, slave);

1921 1922 1923
	netdev_info(bond_dev, "Releasing %s interface %s\n",
		    bond_is_active_slave(slave) ? "active" : "backup",
		    slave_dev->name);
L
Linus Torvalds 已提交
1924

1925
	oldcurrent = rcu_access_pointer(bond->curr_active_slave);
L
Linus Torvalds 已提交
1926

1927
	RCU_INIT_POINTER(bond->current_arp_slave, NULL);
L
Linus Torvalds 已提交
1928

1929
	if (!all && (!bond->params.fail_over_mac ||
1930
		     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
1931
		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
1932
		    bond_has_slaves(bond))
1933 1934 1935
			netdev_warn(bond_dev, "the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
				    slave_dev->name, slave->perm_hwaddr,
				    bond_dev->name, slave_dev->name);
1936 1937
	}

1938 1939
	if (rtnl_dereference(bond->primary_slave) == slave)
		RCU_INIT_POINTER(bond->primary_slave, NULL);
L
Linus Torvalds 已提交
1940

1941
	if (oldcurrent == slave)
L
Linus Torvalds 已提交
1942 1943
		bond_change_active_slave(bond, NULL);

1944
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
1945 1946 1947 1948 1949 1950 1951 1952
		/* Must be called only after the slave has been
		 * detached from the list and the curr_active_slave
		 * has been cleared (if our_slave == old_current),
		 * but before a new active slave is selected.
		 */
		bond_alb_deinit_slave(bond, slave);
	}

1953
	if (all) {
1954
		RCU_INIT_POINTER(bond->curr_active_slave, NULL);
1955
	} else if (oldcurrent == slave) {
1956
		/* Note that we hold RTNL over this sequence, so there
1957 1958 1959
		 * is no concern that another slave add/remove event
		 * will interfere.
		 */
L
Linus Torvalds 已提交
1960
		bond_select_active_slave(bond);
1961 1962
	}

1963
	if (!bond_has_slaves(bond)) {
1964
		bond_set_carrier(bond);
1965
		eth_hw_addr_random(bond_dev);
L
Linus Torvalds 已提交
1966 1967
	}

1968
	unblock_netpoll_tx();
1969
	synchronize_rcu();
1970
	bond->slave_cnt--;
L
Linus Torvalds 已提交
1971

1972
	if (!bond_has_slaves(bond)) {
1973
		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
1974 1975
		call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
	}
1976

1977 1978 1979
	bond_compute_features(bond);
	if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
	    (old_features & NETIF_F_VLAN_CHALLENGED))
1980 1981
		netdev_info(bond_dev, "last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
			    slave_dev->name, bond_dev->name);
1982

1983
	vlan_vids_del_by_dev(slave_dev, bond_dev);
L
Linus Torvalds 已提交
1984

1985
	/* If the mode uses primary, then this case was handled above by
1986
	 * bond_change_active_slave(..., NULL)
L
Linus Torvalds 已提交
1987
	 */
1988
	if (!bond_uses_primary(bond)) {
1989 1990 1991 1992 1993 1994 1995 1996
		/* unset promiscuity level from slave
		 * NOTE: The NETDEV_CHANGEADDR call above may change the value
		 * of the IFF_PROMISC flag in the bond_dev, but we need the
		 * value of that flag before that change, as that was the value
		 * when this slave was attached, so we cache at the start of the
		 * function and use it here. Same goes for ALLMULTI below
		 */
		if (old_flags & IFF_PROMISC)
L
Linus Torvalds 已提交
1997 1998 1999
			dev_set_promiscuity(slave_dev, -1);

		/* unset allmulti level from slave */
2000
		if (old_flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
2001 2002
			dev_set_allmulti(slave_dev, -1);

2003
		bond_hw_addr_flush(bond_dev, slave_dev);
L
Linus Torvalds 已提交
2004 2005
	}

2006
	slave_disable_netpoll(slave);
2007

L
Linus Torvalds 已提交
2008 2009 2010
	/* close slave before restoring its mac address */
	dev_close(slave_dev);

2011
	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2012
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2013
		/* restore original ("permanent") mac address */
2014 2015 2016
		bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
				  slave->dev->addr_len);
		ss.ss_family = slave_dev->type;
2017
		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2018
	}
L
Linus Torvalds 已提交
2019

2020 2021 2022 2023
	if (unregister)
		__dev_set_mtu(slave_dev, slave->original_mtu);
	else
		dev_set_mtu(slave_dev, slave->original_mtu);
2024

2025
	slave_dev->priv_flags &= ~IFF_BONDING;
L
Linus Torvalds 已提交
2026

2027
	bond_free_slave(slave);
L
Linus Torvalds 已提交
2028

2029
	return 0;
L
Linus Torvalds 已提交
2030 2031
}

2032 2033 2034
/* A wrapper used because of ndo_del_link */
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
{
2035
	return __bond_release_one(bond_dev, slave_dev, false, false);
2036 2037
}

2038 2039 2040
/* First release a slave and then destroy the bond if no more slaves are left.
 * Must be under rtnl_lock when this function is called.
 */
2041 2042
static int  bond_release_and_destroy(struct net_device *bond_dev,
				     struct net_device *slave_dev)
2043
{
2044
	struct bonding *bond = netdev_priv(bond_dev);
2045 2046
	int ret;

2047
	ret = __bond_release_one(bond_dev, slave_dev, false, true);
2048
	if (ret == 0 && !bond_has_slaves(bond)) {
2049
		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2050 2051
		netdev_info(bond_dev, "Destroying bond %s\n",
			    bond_dev->name);
2052
		bond_remove_proc_entry(bond);
S
Stephen Hemminger 已提交
2053
		unregister_netdevice(bond_dev);
2054 2055 2056 2057
	}
	return ret;
}

2058
static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
L
Linus Torvalds 已提交
2059
{
2060
	struct bonding *bond = netdev_priv(bond_dev);
2061
	bond_fill_ifbond(bond, info);
L
Linus Torvalds 已提交
2062 2063 2064 2065
}

static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
2066
	struct bonding *bond = netdev_priv(bond_dev);
2067
	struct list_head *iter;
2068
	int i = 0, res = -ENODEV;
L
Linus Torvalds 已提交
2069 2070
	struct slave *slave;

2071
	bond_for_each_slave(bond, slave, iter) {
2072
		if (i++ == (int)info->slave_id) {
2073
			res = 0;
2074
			bond_fill_ifslave(slave, info);
L
Linus Torvalds 已提交
2075 2076 2077 2078
			break;
		}
	}

2079
	return res;
L
Linus Torvalds 已提交
2080 2081 2082 2083
}

/*-------------------------------- Monitoring -------------------------------*/

2084
/* called with rcu_read_lock() */
J
Jay Vosburgh 已提交
2085 2086
static int bond_miimon_inspect(struct bonding *bond)
{
2087
	int link_state, commit = 0;
2088
	struct list_head *iter;
J
Jay Vosburgh 已提交
2089
	struct slave *slave;
2090 2091
	bool ignore_updelay;

2092
	ignore_updelay = !rcu_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
2093

2094
	bond_for_each_slave_rcu(bond, slave, iter) {
J
Jay Vosburgh 已提交
2095
		slave->new_link = BOND_LINK_NOCHANGE;
2096
		slave->link_new_state = slave->link;
L
Linus Torvalds 已提交
2097

J
Jay Vosburgh 已提交
2098
		link_state = bond_check_dev_link(bond, slave->dev, 0);
L
Linus Torvalds 已提交
2099 2100

		switch (slave->link) {
J
Jay Vosburgh 已提交
2101 2102 2103
		case BOND_LINK_UP:
			if (link_state)
				continue;
L
Linus Torvalds 已提交
2104

2105
			bond_propose_link_state(slave, BOND_LINK_FAIL);
2106
			commit++;
J
Jay Vosburgh 已提交
2107 2108
			slave->delay = bond->params.downdelay;
			if (slave->delay) {
2109 2110 2111 2112 2113 2114 2115
				netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
					    (BOND_MODE(bond) ==
					     BOND_MODE_ACTIVEBACKUP) ?
					     (bond_is_active_slave(slave) ?
					      "active " : "backup ") : "",
					    slave->dev->name,
					    bond->params.downdelay * bond->params.miimon);
L
Linus Torvalds 已提交
2116
			}
J
Jay Vosburgh 已提交
2117 2118 2119
			/*FALLTHRU*/
		case BOND_LINK_FAIL:
			if (link_state) {
2120
				/* recovered before downdelay expired */
2121
				bond_propose_link_state(slave, BOND_LINK_UP);
2122
				slave->last_link_up = jiffies;
2123 2124 2125 2126
				netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
					    (bond->params.downdelay - slave->delay) *
					    bond->params.miimon,
					    slave->dev->name);
2127
				commit++;
J
Jay Vosburgh 已提交
2128
				continue;
L
Linus Torvalds 已提交
2129
			}
J
Jay Vosburgh 已提交
2130 2131 2132 2133 2134

			if (slave->delay <= 0) {
				slave->new_link = BOND_LINK_DOWN;
				commit++;
				continue;
L
Linus Torvalds 已提交
2135 2136
			}

J
Jay Vosburgh 已提交
2137 2138 2139 2140 2141 2142 2143
			slave->delay--;
			break;

		case BOND_LINK_DOWN:
			if (!link_state)
				continue;

2144
			bond_propose_link_state(slave, BOND_LINK_BACK);
2145
			commit++;
J
Jay Vosburgh 已提交
2146 2147 2148
			slave->delay = bond->params.updelay;

			if (slave->delay) {
2149 2150 2151 2152 2153
				netdev_info(bond->dev, "link status up for interface %s, enabling it in %d ms\n",
					    slave->dev->name,
					    ignore_updelay ? 0 :
					    bond->params.updelay *
					    bond->params.miimon);
J
Jay Vosburgh 已提交
2154 2155 2156 2157
			}
			/*FALLTHRU*/
		case BOND_LINK_BACK:
			if (!link_state) {
2158
				bond_propose_link_state(slave, BOND_LINK_DOWN);
2159 2160 2161 2162
				netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
					    (bond->params.updelay - slave->delay) *
					    bond->params.miimon,
					    slave->dev->name);
2163
				commit++;
J
Jay Vosburgh 已提交
2164 2165 2166
				continue;
			}

2167 2168 2169
			if (ignore_updelay)
				slave->delay = 0;

J
Jay Vosburgh 已提交
2170 2171 2172
			if (slave->delay <= 0) {
				slave->new_link = BOND_LINK_UP;
				commit++;
2173
				ignore_updelay = false;
J
Jay Vosburgh 已提交
2174
				continue;
L
Linus Torvalds 已提交
2175
			}
J
Jay Vosburgh 已提交
2176 2177

			slave->delay--;
L
Linus Torvalds 已提交
2178
			break;
J
Jay Vosburgh 已提交
2179 2180
		}
	}
L
Linus Torvalds 已提交
2181

J
Jay Vosburgh 已提交
2182 2183
	return commit;
}
L
Linus Torvalds 已提交
2184

2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202
static void bond_miimon_link_change(struct bonding *bond,
				    struct slave *slave,
				    char link)
{
	switch (BOND_MODE(bond)) {
	case BOND_MODE_8023AD:
		bond_3ad_handle_link_change(slave, link);
		break;
	case BOND_MODE_TLB:
	case BOND_MODE_ALB:
		bond_alb_handle_link_change(bond, slave, link);
		break;
	case BOND_MODE_XOR:
		bond_update_slave_arr(bond, NULL);
		break;
	}
}

J
Jay Vosburgh 已提交
2203 2204
static void bond_miimon_commit(struct bonding *bond)
{
2205
	struct list_head *iter;
2206
	struct slave *slave, *primary;
J
Jay Vosburgh 已提交
2207

2208
	bond_for_each_slave(bond, slave, iter) {
J
Jay Vosburgh 已提交
2209 2210 2211
		switch (slave->new_link) {
		case BOND_LINK_NOCHANGE:
			continue;
L
Linus Torvalds 已提交
2212

J
Jay Vosburgh 已提交
2213
		case BOND_LINK_UP:
2214 2215
			if (bond_update_speed_duplex(slave) &&
			    bond_needs_speed_duplex(bond)) {
2216
				slave->link = BOND_LINK_DOWN;
2217 2218 2219 2220
				if (net_ratelimit())
					netdev_warn(bond->dev,
						    "failed to get link speed/duplex for %s\n",
						    slave->dev->name);
2221 2222
				continue;
			}
2223 2224
			bond_set_slave_link_state(slave, BOND_LINK_UP,
						  BOND_SLAVE_NOTIFY_NOW);
2225
			slave->last_link_up = jiffies;
J
Jay Vosburgh 已提交
2226

2227
			primary = rtnl_dereference(bond->primary_slave);
2228
			if (BOND_MODE(bond) == BOND_MODE_8023AD) {
J
Jay Vosburgh 已提交
2229
				/* prevent it from being the active one */
J
Jiri Pirko 已提交
2230
				bond_set_backup_slave(slave);
2231
			} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
J
Jay Vosburgh 已提交
2232
				/* make it immediately active */
J
Jiri Pirko 已提交
2233
				bond_set_active_slave(slave);
2234
			} else if (slave != primary) {
J
Jay Vosburgh 已提交
2235
				/* prevent it from being the active one */
J
Jiri Pirko 已提交
2236
				bond_set_backup_slave(slave);
L
Linus Torvalds 已提交
2237 2238
			}

2239 2240 2241 2242
			netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",
				    slave->dev->name,
				    slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
				    slave->duplex ? "full" : "half");
L
Linus Torvalds 已提交
2243

2244
			bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2245

2246
			if (!bond->curr_active_slave || slave == primary)
J
Jay Vosburgh 已提交
2247
				goto do_failover;
L
Linus Torvalds 已提交
2248

J
Jay Vosburgh 已提交
2249
			continue;
2250

J
Jay Vosburgh 已提交
2251
		case BOND_LINK_DOWN:
J
Jay Vosburgh 已提交
2252 2253 2254
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;

2255 2256
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
2257

2258 2259
			if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
			    BOND_MODE(bond) == BOND_MODE_8023AD)
2260 2261
				bond_set_slave_inactive_flags(slave,
							      BOND_SLAVE_NOTIFY_NOW);
J
Jay Vosburgh 已提交
2262

2263 2264
			netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
				    slave->dev->name);
J
Jay Vosburgh 已提交
2265

2266
			bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2267

2268
			if (slave == rcu_access_pointer(bond->curr_active_slave))
J
Jay Vosburgh 已提交
2269 2270 2271 2272 2273
				goto do_failover;

			continue;

		default:
2274 2275
			netdev_err(bond->dev, "invalid new link %d on slave %s\n",
				   slave->new_link, slave->dev->name);
J
Jay Vosburgh 已提交
2276 2277 2278 2279 2280 2281
			slave->new_link = BOND_LINK_NOCHANGE;

			continue;
		}

do_failover:
2282
		block_netpoll_tx();
J
Jay Vosburgh 已提交
2283
		bond_select_active_slave(bond);
2284
		unblock_netpoll_tx();
J
Jay Vosburgh 已提交
2285 2286 2287
	}

	bond_set_carrier(bond);
L
Linus Torvalds 已提交
2288 2289
}

2290
/* bond_mii_monitor
2291 2292
 *
 * Really a wrapper that splits the mii monitor into two phases: an
J
Jay Vosburgh 已提交
2293 2294 2295
 * inspection, then (if inspection indicates something needs to be done)
 * an acquisition of appropriate locks followed by a commit phase to
 * implement whatever link state changes are indicated.
2296
 */
2297
static void bond_mii_monitor(struct work_struct *work)
2298 2299 2300
{
	struct bonding *bond = container_of(work, struct bonding,
					    mii_work.work);
2301
	bool should_notify_peers = false;
2302
	unsigned long delay;
2303 2304
	struct slave *slave;
	struct list_head *iter;
2305

2306 2307 2308
	delay = msecs_to_jiffies(bond->params.miimon);

	if (!bond_has_slaves(bond))
J
Jay Vosburgh 已提交
2309
		goto re_arm;
2310

2311 2312
	rcu_read_lock();

2313 2314
	should_notify_peers = bond_should_notify_peers(bond);

2315
	if (bond_miimon_inspect(bond)) {
2316
		rcu_read_unlock();
J
Jay Vosburgh 已提交
2317

2318 2319 2320 2321 2322 2323
		/* Race avoidance with bond_close cancel of workqueue */
		if (!rtnl_trylock()) {
			delay = 1;
			should_notify_peers = false;
			goto re_arm;
		}
2324

2325 2326 2327
		bond_for_each_slave(bond, slave, iter) {
			bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
		}
2328 2329 2330
		bond_miimon_commit(bond);

		rtnl_unlock();	/* might sleep, hold no other locks */
2331 2332
	} else
		rcu_read_unlock();
2333

J
Jay Vosburgh 已提交
2334
re_arm:
2335
	if (bond->params.miimon)
2336 2337 2338 2339 2340 2341 2342 2343
		queue_delayed_work(bond->wq, &bond->mii_work, delay);

	if (should_notify_peers) {
		if (!rtnl_trylock())
			return;
		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
		rtnl_unlock();
	}
2344
}
J
Jay Vosburgh 已提交
2345

2346 2347 2348 2349 2350 2351 2352
static int bond_upper_dev_walk(struct net_device *upper, void *data)
{
	__be32 ip = *((__be32 *)data);

	return ip == bond_confirm_addr(upper, 0, ip);
}

2353
static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2354
{
2355
	bool ret = false;
2356

2357
	if (ip == bond_confirm_addr(bond->dev, 0, ip))
2358
		return true;
2359

2360
	rcu_read_lock();
2361 2362
	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &ip))
		ret = true;
2363
	rcu_read_unlock();
2364

2365
	return ret;
2366 2367
}

2368
/* We go to the (large) trouble of VLAN tagging ARP frames because
J
Jay Vosburgh 已提交
2369 2370 2371
 * switches in VLAN mode (especially if ports are configured as
 * "native" to a VLAN) might not pass non-tagged frames.
 */
2372 2373
static void bond_arp_send(struct net_device *slave_dev, int arp_op,
			  __be32 dest_ip, __be32 src_ip,
2374
			  struct bond_vlan_tag *tags)
J
Jay Vosburgh 已提交
2375 2376
{
	struct sk_buff *skb;
2377
	struct bond_vlan_tag *outer_tag = tags;
J
Jay Vosburgh 已提交
2378

2379 2380
	netdev_dbg(slave_dev, "arp %d on slave %s: dst %pI4 src %pI4\n",
		   arp_op, slave_dev->name, &dest_ip, &src_ip);
S
Stephen Hemminger 已提交
2381

J
Jay Vosburgh 已提交
2382 2383 2384 2385
	skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
			 NULL, slave_dev->dev_addr, NULL);

	if (!skb) {
2386
		net_err_ratelimited("ARP packet allocation failed\n");
J
Jay Vosburgh 已提交
2387 2388
		return;
	}
2389

2390 2391 2392 2393 2394
	if (!tags || tags->vlan_proto == VLAN_N_VID)
		goto xmit;

	tags++;

2395
	/* Go through all the tags backwards and add them to the packet */
2396 2397 2398
	while (tags->vlan_proto != VLAN_N_VID) {
		if (!tags->vlan_id) {
			tags++;
2399
			continue;
2400
		}
2401

2402
		netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
2403
			   ntohs(outer_tag->vlan_proto), tags->vlan_id);
2404 2405
		skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
						tags->vlan_id);
2406 2407 2408 2409
		if (!skb) {
			net_err_ratelimited("failed to insert inner VLAN tag\n");
			return;
		}
2410 2411

		tags++;
2412 2413
	}
	/* Set the outer tag */
2414
	if (outer_tag->vlan_id) {
2415
		netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
2416
			   ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
J
Jiri Pirko 已提交
2417 2418
		__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
				       outer_tag->vlan_id);
J
Jay Vosburgh 已提交
2419
	}
2420 2421

xmit:
J
Jay Vosburgh 已提交
2422 2423 2424
	arp_xmit(skb);
}

2425 2426 2427 2428 2429 2430
/* Validate the device path between the @start_dev and the @end_dev.
 * The path is valid if the @end_dev is reachable through device
 * stacking.
 * When the path is validated, collect any vlan information in the
 * path.
 */
2431 2432 2433
struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
					      struct net_device *end_dev,
					      int level)
2434
{
2435
	struct bond_vlan_tag *tags;
2436 2437 2438
	struct net_device *upper;
	struct list_head  *iter;

2439
	if (start_dev == end_dev) {
K
Kees Cook 已提交
2440
		tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2441 2442 2443 2444 2445
		if (!tags)
			return ERR_PTR(-ENOMEM);
		tags[level].vlan_proto = VLAN_N_VID;
		return tags;
	}
2446 2447

	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2448 2449 2450 2451 2452
		tags = bond_verify_device_path(upper, end_dev, level + 1);
		if (IS_ERR_OR_NULL(tags)) {
			if (IS_ERR(tags))
				return tags;
			continue;
2453
		}
2454 2455 2456 2457 2458 2459
		if (is_vlan_dev(upper)) {
			tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
			tags[level].vlan_id = vlan_dev_vlan_id(upper);
		}

		return tags;
2460 2461
	}

2462
	return NULL;
2463
}
J
Jay Vosburgh 已提交
2464

L
Linus Torvalds 已提交
2465 2466
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
J
Jay Vosburgh 已提交
2467
	struct rtable *rt;
2468
	struct bond_vlan_tag *tags;
2469
	__be32 *targets = bond->params.arp_targets, addr;
2470
	int i;
L
Linus Torvalds 已提交
2471

2472
	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2473
		netdev_dbg(bond->dev, "basa: target %pI4\n", &targets[i]);
2474
		tags = NULL;
J
Jay Vosburgh 已提交
2475

2476
		/* Find out through which dev should the packet go */
2477 2478
		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
				     RTO_ONLINK, 0);
2479
		if (IS_ERR(rt)) {
2480 2481 2482
			/* there's no route to target - try to send arp
			 * probe to generate any traffic (arp_validate=0)
			 */
2483 2484 2485 2486
			if (bond->params.arp_validate)
				net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
						     bond->dev->name,
						     &targets[i]);
2487 2488
			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
				      0, tags);
J
Jay Vosburgh 已提交
2489 2490 2491
			continue;
		}

2492 2493 2494 2495 2496
		/* bond device itself */
		if (rt->dst.dev == bond->dev)
			goto found;

		rcu_read_lock();
2497
		tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
2498
		rcu_read_unlock();
J
Jay Vosburgh 已提交
2499

2500
		if (!IS_ERR_OR_NULL(tags))
2501 2502
			goto found;

2503
		/* Not our device - skip */
2504 2505
		netdev_dbg(bond->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
			   &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
2506

2507
		ip_rt_put(rt);
2508 2509 2510 2511 2512 2513
		continue;

found:
		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
		ip_rt_put(rt);
		bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2514
			      addr, tags);
2515
		kfree(tags);
J
Jay Vosburgh 已提交
2516 2517 2518
	}
}

2519
static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2520
{
2521 2522
	int i;

2523
	if (!sip || !bond_has_this_ip(bond, tip)) {
2524 2525
		netdev_dbg(bond->dev, "bva: sip %pI4 tip %pI4 not found\n",
			   &sip, &tip);
2526 2527
		return;
	}
2528

2529 2530
	i = bond_get_targets_ip(bond->params.arp_targets, sip);
	if (i == -1) {
2531 2532
		netdev_dbg(bond->dev, "bva: sip %pI4 not found in targets\n",
			   &sip);
2533
		return;
2534
	}
2535
	slave->last_rx = jiffies;
2536
	slave->target_last_arp_rx[i] = jiffies;
2537 2538
}

2539 2540
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
		 struct slave *slave)
2541
{
2542
	struct arphdr *arp = (struct arphdr *)skb->data;
2543
	struct slave *curr_active_slave, *curr_arp_slave;
2544
	unsigned char *arp_ptr;
2545
	__be32 sip, tip;
2546 2547
	int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
	unsigned int alen;
2548

2549
	if (!slave_do_arp_validate(bond, slave)) {
2550 2551
		if ((slave_do_arp_validate_only(bond) && is_arp) ||
		    !slave_do_arp_validate_only(bond))
2552
			slave->last_rx = jiffies;
2553
		return RX_HANDLER_ANOTHER;
2554 2555 2556
	} else if (!is_arp) {
		return RX_HANDLER_ANOTHER;
	}
2557

2558
	alen = arp_hdr_len(bond->dev);
2559

2560 2561
	netdev_dbg(bond->dev, "bond_arp_rcv: skb->dev %s\n",
		   skb->dev->name);
2562

2563 2564 2565 2566 2567 2568 2569
	if (alen > skb_headlen(skb)) {
		arp = kmalloc(alen, GFP_ATOMIC);
		if (!arp)
			goto out_unlock;
		if (skb_copy_bits(skb, 0, arp, alen) < 0)
			goto out_unlock;
	}
2570

2571
	if (arp->ar_hln != bond->dev->addr_len ||
2572 2573 2574 2575 2576 2577 2578 2579
	    skb->pkt_type == PACKET_OTHERHOST ||
	    skb->pkt_type == PACKET_LOOPBACK ||
	    arp->ar_hrd != htons(ARPHRD_ETHER) ||
	    arp->ar_pro != htons(ETH_P_IP) ||
	    arp->ar_pln != 4)
		goto out_unlock;

	arp_ptr = (unsigned char *)(arp + 1);
2580
	arp_ptr += bond->dev->addr_len;
2581
	memcpy(&sip, arp_ptr, 4);
2582
	arp_ptr += 4 + bond->dev->addr_len;
2583 2584
	memcpy(&tip, arp_ptr, 4);

2585 2586 2587 2588
	netdev_dbg(bond->dev, "bond_arp_rcv: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
		   slave->dev->name, bond_slave_state(slave),
		     bond->params.arp_validate, slave_do_arp_validate(bond, slave),
		     &sip, &tip);
2589

2590
	curr_active_slave = rcu_dereference(bond->curr_active_slave);
2591
	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2592

2593
	/* We 'trust' the received ARP enough to validate it if:
2594
	 *
2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
	 * (a) the slave receiving the ARP is active (which includes the
	 * current ARP slave, if any), or
	 *
	 * (b) the receiving slave isn't active, but there is a currently
	 * active slave and it received valid arp reply(s) after it became
	 * the currently active slave, or
	 *
	 * (c) there is an ARP slave that sent an ARP during the prior ARP
	 * interval, and we receive an ARP reply on any slave.  We accept
	 * these because switch FDB update delays may deliver the ARP
	 * reply to a slave other than the sender of the ARP request.
	 *
	 * Note: for (b), backup slaves are receiving the broadcast ARP
	 * request, not a reply.  This request passes from the sending
	 * slave through the L2 switch(es) to the receiving slave.  Since
	 * this is checking the request, sip/tip are swapped for
	 * validation.
	 *
	 * This is done to avoid endless looping when we can't reach the
2614
	 * arp_ip_target and fool ourselves with our own arp requests.
2615
	 */
J
Jiri Pirko 已提交
2616
	if (bond_is_active_slave(slave))
2617
		bond_validate_arp(bond, slave, sip, tip);
2618 2619 2620
	else if (curr_active_slave &&
		 time_after(slave_last_rx(bond, curr_active_slave),
			    curr_active_slave->last_link_up))
2621
		bond_validate_arp(bond, slave, tip, sip);
2622 2623 2624 2625
	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
		 bond_time_in_interval(bond,
				       dev_trans_start(curr_arp_slave->dev), 1))
		bond_validate_arp(bond, slave, sip, tip);
2626 2627

out_unlock:
2628 2629
	if (arp != (struct arphdr *)skb->data)
		kfree(arp);
2630
	return RX_HANDLER_ANOTHER;
2631 2632
}

2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646
/* function to verify if we're in the arp_interval timeslice, returns true if
 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
 */
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
				  int mod)
{
	int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);

	return time_in_range(jiffies,
			     last_act - delta_in_ticks,
			     last_act + mod * delta_in_ticks + delta_in_ticks/2);
}

2647
/* This function is called regularly to monitor each slave's link
L
Linus Torvalds 已提交
2648 2649 2650 2651 2652
 * ensuring that traffic is being sent and received when arp monitoring
 * is used in load-balancing mode. if the adapter has been dormant, then an
 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
 * arp monitoring in active backup mode.
 */
2653
static void bond_loadbalance_arp_mon(struct bonding *bond)
L
Linus Torvalds 已提交
2654 2655
{
	struct slave *slave, *oldcurrent;
2656
	struct list_head *iter;
2657
	int do_failover = 0, slave_state_changed = 0;
L
Linus Torvalds 已提交
2658

2659
	if (!bond_has_slaves(bond))
L
Linus Torvalds 已提交
2660 2661
		goto re_arm;

2662 2663
	rcu_read_lock();

2664
	oldcurrent = rcu_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
2665 2666
	/* see if any of the previous devices are up now (i.e. they have
	 * xmt and rcv traffic). the curr_active_slave does not come into
2667 2668 2669
	 * the picture unless it is null. also, slave->last_link_up is not
	 * needed here because we send an arp on each slave and give a slave
	 * as long as it needs to get the tx/rx within the delta.
L
Linus Torvalds 已提交
2670 2671 2672
	 * TODO: what about up/down delay in arp mode? it wasn't here before
	 *       so it can wait
	 */
2673
	bond_for_each_slave_rcu(bond, slave, iter) {
2674 2675
		unsigned long trans_start = dev_trans_start(slave->dev);

2676 2677
		slave->new_link = BOND_LINK_NOCHANGE;

L
Linus Torvalds 已提交
2678
		if (slave->link != BOND_LINK_UP) {
2679
			if (bond_time_in_interval(bond, trans_start, 1) &&
2680
			    bond_time_in_interval(bond, slave->last_rx, 1)) {
L
Linus Torvalds 已提交
2681

2682
				slave->new_link = BOND_LINK_UP;
2683
				slave_state_changed = 1;
L
Linus Torvalds 已提交
2684 2685 2686 2687 2688 2689 2690

				/* primary_slave has no meaning in round-robin
				 * mode. the window of a slave being up and
				 * curr_active_slave being null after enslaving
				 * is closed.
				 */
				if (!oldcurrent) {
2691 2692
					netdev_info(bond->dev, "link status definitely up for interface %s\n",
						    slave->dev->name);
L
Linus Torvalds 已提交
2693 2694
					do_failover = 1;
				} else {
2695 2696
					netdev_info(bond->dev, "interface %s is now up\n",
						    slave->dev->name);
L
Linus Torvalds 已提交
2697 2698 2699 2700 2701 2702 2703 2704 2705
				}
			}
		} else {
			/* slave->link == BOND_LINK_UP */

			/* not all switches will respond to an arp request
			 * when the source ip is 0, so don't take the link down
			 * if we don't know our ip yet
			 */
2706
			if (!bond_time_in_interval(bond, trans_start, 2) ||
2707
			    !bond_time_in_interval(bond, slave->last_rx, 2)) {
L
Linus Torvalds 已提交
2708

2709
				slave->new_link = BOND_LINK_DOWN;
2710
				slave_state_changed = 1;
L
Linus Torvalds 已提交
2711

S
Stephen Hemminger 已提交
2712
				if (slave->link_failure_count < UINT_MAX)
L
Linus Torvalds 已提交
2713 2714
					slave->link_failure_count++;

2715 2716
				netdev_info(bond->dev, "interface %s is now down\n",
					    slave->dev->name);
L
Linus Torvalds 已提交
2717

S
Stephen Hemminger 已提交
2718
				if (slave == oldcurrent)
L
Linus Torvalds 已提交
2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729
					do_failover = 1;
			}
		}

		/* note: if switch is in round-robin mode, all links
		 * must tx arp to ensure all links rx an arp - otherwise
		 * links may oscillate or not come up at all; if switch is
		 * in something like xor mode, there is nothing we can
		 * do - all replies will be rx'ed on same link causing slaves
		 * to be unstable during low/no traffic periods
		 */
2730
		if (bond_slave_is_up(slave))
L
Linus Torvalds 已提交
2731 2732 2733
			bond_arp_send_all(bond, slave);
	}

2734 2735
	rcu_read_unlock();

2736
	if (do_failover || slave_state_changed) {
2737 2738
		if (!rtnl_trylock())
			goto re_arm;
L
Linus Torvalds 已提交
2739

2740 2741 2742 2743 2744
		bond_for_each_slave(bond, slave, iter) {
			if (slave->new_link != BOND_LINK_NOCHANGE)
				slave->link = slave->new_link;
		}

2745 2746
		if (slave_state_changed) {
			bond_slave_state_change(bond);
2747 2748
			if (BOND_MODE(bond) == BOND_MODE_XOR)
				bond_update_slave_arr(bond, NULL);
2749 2750
		}
		if (do_failover) {
2751 2752 2753 2754
			block_netpoll_tx();
			bond_select_active_slave(bond);
			unblock_netpoll_tx();
		}
2755
		rtnl_unlock();
L
Linus Torvalds 已提交
2756 2757 2758
	}

re_arm:
2759
	if (bond->params.arp_interval)
2760 2761
		queue_delayed_work(bond->wq, &bond->arp_work,
				   msecs_to_jiffies(bond->params.arp_interval));
L
Linus Torvalds 已提交
2762 2763
}

2764
/* Called to inspect slaves for active-backup mode ARP monitor link state
2765 2766 2767 2768
 * changes.  Sets new_link in slaves to specify what action should take
 * place for the slave.  Returns 0 if no changes are found, >0 if changes
 * to link states must be committed.
 *
2769
 * Called with rcu_read_lock held.
L
Linus Torvalds 已提交
2770
 */
2771
static int bond_ab_arp_inspect(struct bonding *bond)
L
Linus Torvalds 已提交
2772
{
2773
	unsigned long trans_start, last_rx;
2774
	struct list_head *iter;
2775 2776
	struct slave *slave;
	int commit = 0;
2777

2778
	bond_for_each_slave_rcu(bond, slave, iter) {
2779
		slave->new_link = BOND_LINK_NOCHANGE;
2780
		last_rx = slave_last_rx(bond, slave);
L
Linus Torvalds 已提交
2781

2782
		if (slave->link != BOND_LINK_UP) {
2783
			if (bond_time_in_interval(bond, last_rx, 1)) {
2784 2785 2786 2787 2788
				slave->new_link = BOND_LINK_UP;
				commit++;
			}
			continue;
		}
L
Linus Torvalds 已提交
2789

2790
		/* Give slaves 2*delta after being enslaved or made
2791 2792 2793
		 * active.  This avoids bouncing, as the last receive
		 * times need a full ARP monitor cycle to be updated.
		 */
2794
		if (bond_time_in_interval(bond, slave->last_link_up, 2))
2795 2796
			continue;

2797
		/* Backup slave is down if:
2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
		 * - No current_arp_slave AND
		 * - more than 3*delta since last receive AND
		 * - the bond has an IP address
		 *
		 * Note: a non-null current_arp_slave indicates
		 * the curr_active_slave went down and we are
		 * searching for a new one; under this condition
		 * we only take the curr_active_slave down - this
		 * gives each slave a chance to tx/rx traffic
		 * before being taken out
		 */
J
Jiri Pirko 已提交
2809
		if (!bond_is_active_slave(slave) &&
2810
		    !rcu_access_pointer(bond->current_arp_slave) &&
2811
		    !bond_time_in_interval(bond, last_rx, 3)) {
2812 2813 2814 2815
			slave->new_link = BOND_LINK_DOWN;
			commit++;
		}

2816
		/* Active slave is down if:
2817 2818 2819 2820
		 * - more than 2*delta since transmitting OR
		 * - (more than 2*delta since receive AND
		 *    the bond has an IP address)
		 */
2821
		trans_start = dev_trans_start(slave->dev);
J
Jiri Pirko 已提交
2822
		if (bond_is_active_slave(slave) &&
2823 2824
		    (!bond_time_in_interval(bond, trans_start, 2) ||
		     !bond_time_in_interval(bond, last_rx, 2))) {
2825 2826 2827
			slave->new_link = BOND_LINK_DOWN;
			commit++;
		}
L
Linus Torvalds 已提交
2828 2829
	}

2830 2831
	return commit;
}
L
Linus Torvalds 已提交
2832

2833
/* Called to commit link state changes noted by inspection step of
2834 2835
 * active-backup mode ARP monitor.
 *
2836
 * Called with RTNL hold.
2837
 */
2838
static void bond_ab_arp_commit(struct bonding *bond)
2839
{
2840
	unsigned long trans_start;
2841
	struct list_head *iter;
2842
	struct slave *slave;
L
Linus Torvalds 已提交
2843

2844
	bond_for_each_slave(bond, slave, iter) {
2845 2846 2847
		switch (slave->new_link) {
		case BOND_LINK_NOCHANGE:
			continue;
2848

2849
		case BOND_LINK_UP:
2850
			trans_start = dev_trans_start(slave->dev);
2851 2852
			if (rtnl_dereference(bond->curr_active_slave) != slave ||
			    (!rtnl_dereference(bond->curr_active_slave) &&
2853
			     bond_time_in_interval(bond, trans_start, 1))) {
2854 2855 2856
				struct slave *current_arp_slave;

				current_arp_slave = rtnl_dereference(bond->current_arp_slave);
2857 2858
				bond_set_slave_link_state(slave, BOND_LINK_UP,
							  BOND_SLAVE_NOTIFY_NOW);
2859
				if (current_arp_slave) {
2860
					bond_set_slave_inactive_flags(
2861
						current_arp_slave,
2862
						BOND_SLAVE_NOTIFY_NOW);
2863
					RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2864
				}
2865

2866 2867
				netdev_info(bond->dev, "link status definitely up for interface %s\n",
					    slave->dev->name);
2868

2869
				if (!rtnl_dereference(bond->curr_active_slave) ||
2870
				    slave == rtnl_dereference(bond->primary_slave))
2871
					goto do_failover;
L
Linus Torvalds 已提交
2872

2873
			}
L
Linus Torvalds 已提交
2874

2875
			continue;
L
Linus Torvalds 已提交
2876

2877 2878 2879 2880
		case BOND_LINK_DOWN:
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;

2881 2882
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
2883 2884
			bond_set_slave_inactive_flags(slave,
						      BOND_SLAVE_NOTIFY_NOW);
2885

2886 2887
			netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
				    slave->dev->name);
2888

2889
			if (slave == rtnl_dereference(bond->curr_active_slave)) {
2890
				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2891
				goto do_failover;
L
Linus Torvalds 已提交
2892
			}
2893 2894

			continue;
2895 2896

		default:
2897 2898
			netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
				   slave->new_link, slave->dev->name);
2899
			continue;
L
Linus Torvalds 已提交
2900 2901
		}

2902
do_failover:
2903
		block_netpoll_tx();
2904
		bond_select_active_slave(bond);
2905
		unblock_netpoll_tx();
2906
	}
L
Linus Torvalds 已提交
2907

2908 2909
	bond_set_carrier(bond);
}
L
Linus Torvalds 已提交
2910

2911
/* Send ARP probes for active-backup mode ARP monitor.
2912
 *
2913
 * Called with rcu_read_lock held.
2914
 */
2915
static bool bond_ab_arp_probe(struct bonding *bond)
2916
{
2917
	struct slave *slave, *before = NULL, *new_slave = NULL,
2918 2919
		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
		     *curr_active_slave = rcu_dereference(bond->curr_active_slave);
2920 2921
	struct list_head *iter;
	bool found = false;
2922
	bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
2923

2924
	if (curr_arp_slave && curr_active_slave)
2925 2926 2927
		netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
			    curr_arp_slave->dev->name,
			    curr_active_slave->dev->name);
L
Linus Torvalds 已提交
2928

2929 2930
	if (curr_active_slave) {
		bond_arp_send_all(bond, curr_active_slave);
2931
		return should_notify_rtnl;
2932
	}
L
Linus Torvalds 已提交
2933

2934 2935 2936 2937
	/* if we don't have a curr_active_slave, search for the next available
	 * backup slave from the current_arp_slave and make it the candidate
	 * for becoming the curr_active_slave
	 */
L
Linus Torvalds 已提交
2938

2939
	if (!curr_arp_slave) {
2940 2941 2942
		curr_arp_slave = bond_first_slave_rcu(bond);
		if (!curr_arp_slave)
			return should_notify_rtnl;
2943
	}
L
Linus Torvalds 已提交
2944

2945
	bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
2946

2947
	bond_for_each_slave_rcu(bond, slave, iter) {
2948
		if (!found && !before && bond_slave_is_up(slave))
2949
			before = slave;
L
Linus Torvalds 已提交
2950

2951
		if (found && !new_slave && bond_slave_is_up(slave))
2952
			new_slave = slave;
2953 2954 2955 2956 2957 2958
		/* if the link state is up at this point, we
		 * mark it down - this can happen if we have
		 * simultaneous link failures and
		 * reselect_active_interface doesn't make this
		 * one the current slave so it is still marked
		 * up when it is actually down
L
Linus Torvalds 已提交
2959
		 */
2960
		if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
2961 2962
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_LATER);
2963 2964
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;
L
Linus Torvalds 已提交
2965

2966
			bond_set_slave_inactive_flags(slave,
2967
						      BOND_SLAVE_NOTIFY_LATER);
2968

2969 2970
			netdev_info(bond->dev, "backup interface %s is now down\n",
				    slave->dev->name);
L
Linus Torvalds 已提交
2971
		}
2972
		if (slave == curr_arp_slave)
2973
			found = true;
2974
	}
2975 2976 2977 2978

	if (!new_slave && before)
		new_slave = before;

2979 2980
	if (!new_slave)
		goto check_state;
2981

2982 2983
	bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
				  BOND_SLAVE_NOTIFY_LATER);
2984
	bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
2985
	bond_arp_send_all(bond, new_slave);
2986
	new_slave->last_link_up = jiffies;
2987
	rcu_assign_pointer(bond->current_arp_slave, new_slave);
2988

2989 2990
check_state:
	bond_for_each_slave_rcu(bond, slave, iter) {
2991
		if (slave->should_notify || slave->should_notify_link) {
2992 2993 2994 2995 2996
			should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
			break;
		}
	}
	return should_notify_rtnl;
2997
}
L
Linus Torvalds 已提交
2998

2999
static void bond_activebackup_arp_mon(struct bonding *bond)
3000
{
3001 3002
	bool should_notify_peers = false;
	bool should_notify_rtnl = false;
3003
	int delta_in_ticks;
L
Linus Torvalds 已提交
3004

3005 3006 3007
	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);

	if (!bond_has_slaves(bond))
3008 3009
		goto re_arm;

3010
	rcu_read_lock();
3011

3012 3013
	should_notify_peers = bond_should_notify_peers(bond);

3014 3015 3016
	if (bond_ab_arp_inspect(bond)) {
		rcu_read_unlock();

3017 3018 3019 3020 3021 3022
		/* Race avoidance with bond_close flush of workqueue */
		if (!rtnl_trylock()) {
			delta_in_ticks = 1;
			should_notify_peers = false;
			goto re_arm;
		}
3023

3024
		bond_ab_arp_commit(bond);
3025

3026
		rtnl_unlock();
3027
		rcu_read_lock();
3028 3029
	}

3030 3031
	should_notify_rtnl = bond_ab_arp_probe(bond);
	rcu_read_unlock();
3032

3033 3034
re_arm:
	if (bond->params.arp_interval)
3035 3036
		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);

3037
	if (should_notify_peers || should_notify_rtnl) {
3038 3039
		if (!rtnl_trylock())
			return;
3040 3041 3042 3043

		if (should_notify_peers)
			call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
						 bond->dev);
3044
		if (should_notify_rtnl) {
3045
			bond_slave_state_notify(bond);
3046 3047
			bond_slave_link_notify(bond);
		}
3048

3049 3050
		rtnl_unlock();
	}
L
Linus Torvalds 已提交
3051 3052
}

3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063
static void bond_arp_monitor(struct work_struct *work)
{
	struct bonding *bond = container_of(work, struct bonding,
					    arp_work.work);

	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
		bond_activebackup_arp_mon(bond);
	else
		bond_loadbalance_arp_mon(bond);
}

L
Linus Torvalds 已提交
3064 3065
/*-------------------------- netdev event handling --------------------------*/

3066
/* Change device name */
L
Linus Torvalds 已提交
3067 3068 3069 3070
static int bond_event_changename(struct bonding *bond)
{
	bond_remove_proc_entry(bond);
	bond_create_proc_entry(bond);
3071

3072 3073
	bond_debug_reregister(bond);

L
Linus Torvalds 已提交
3074 3075 3076
	return NOTIFY_DONE;
}

S
Stephen Hemminger 已提交
3077 3078
static int bond_master_netdev_event(unsigned long event,
				    struct net_device *bond_dev)
L
Linus Torvalds 已提交
3079
{
3080
	struct bonding *event_bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3081 3082 3083 3084

	switch (event) {
	case NETDEV_CHANGENAME:
		return bond_event_changename(event_bond);
3085 3086 3087 3088 3089 3090
	case NETDEV_UNREGISTER:
		bond_remove_proc_entry(event_bond);
		break;
	case NETDEV_REGISTER:
		bond_create_proc_entry(event_bond);
		break;
3091 3092 3093 3094
	case NETDEV_NOTIFY_PEERS:
		if (event_bond->send_peer_notif)
			event_bond->send_peer_notif--;
		break;
L
Linus Torvalds 已提交
3095 3096 3097 3098 3099 3100 3101
	default:
		break;
	}

	return NOTIFY_DONE;
}

S
Stephen Hemminger 已提交
3102 3103
static int bond_slave_netdev_event(unsigned long event,
				   struct net_device *slave_dev)
L
Linus Torvalds 已提交
3104
{
3105
	struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3106 3107
	struct bonding *bond;
	struct net_device *bond_dev;
L
Linus Torvalds 已提交
3108

3109 3110 3111 3112 3113 3114 3115 3116
	/* A netdev event can be generated while enslaving a device
	 * before netdev_rx_handler_register is called in which case
	 * slave will be NULL
	 */
	if (!slave)
		return NOTIFY_DONE;
	bond_dev = slave->bond->dev;
	bond = slave->bond;
3117
	primary = rtnl_dereference(bond->primary_slave);
3118

L
Linus Torvalds 已提交
3119 3120
	switch (event) {
	case NETDEV_UNREGISTER:
3121
		if (bond_dev->type != ARPHRD_ETHER)
3122 3123
			bond_release_and_destroy(bond_dev, slave_dev);
		else
3124
			__bond_release_one(bond_dev, slave_dev, false, true);
L
Linus Torvalds 已提交
3125
		break;
3126
	case NETDEV_UP:
L
Linus Torvalds 已提交
3127
	case NETDEV_CHANGE:
3128 3129
		/* For 802.3ad mode only:
		 * Getting invalid Speed/Duplex values here will put slave
3130
		 * in weird state. So mark it as link-fail for the time
3131 3132 3133 3134 3135
		 * being and let link-monitoring (miimon) set it right when
		 * correct speeds/duplex are available.
		 */
		if (bond_update_speed_duplex(slave) &&
		    BOND_MODE(bond) == BOND_MODE_8023AD)
3136
			slave->link = BOND_LINK_FAIL;
3137

3138 3139
		if (BOND_MODE(bond) == BOND_MODE_8023AD)
			bond_3ad_adapter_speed_duplex_changed(slave);
M
Mahesh Bandewar 已提交
3140 3141
		/* Fallthrough */
	case NETDEV_DOWN:
3142 3143 3144 3145 3146 3147 3148 3149
		/* Refresh slave-array if applicable!
		 * If the setup does not use miimon or arpmon (mode-specific!),
		 * then these events will not cause the slave-array to be
		 * refreshed. This will cause xmit to use a slave that is not
		 * usable. Avoid such situation by refeshing the array at these
		 * events. If these (miimon/arpmon) parameters are configured
		 * then array gets refreshed twice and that should be fine!
		 */
3150
		if (bond_mode_can_use_xmit_hash(bond))
3151
			bond_update_slave_arr(bond, NULL);
L
Linus Torvalds 已提交
3152 3153
		break;
	case NETDEV_CHANGEMTU:
3154
		/* TODO: Should slaves be allowed to
L
Linus Torvalds 已提交
3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166
		 * independently alter their MTU?  For
		 * an active-backup bond, slaves need
		 * not be the same type of device, so
		 * MTUs may vary.  For other modes,
		 * slaves arguably should have the
		 * same MTUs. To do this, we'd need to
		 * take over the slave's change_mtu
		 * function for the duration of their
		 * servitude.
		 */
		break;
	case NETDEV_CHANGENAME:
3167
		/* we don't care if we don't have primary set */
3168
		if (!bond_uses_primary(bond) ||
3169 3170 3171
		    !bond->params.primary[0])
			break;

3172
		if (slave == primary) {
3173
			/* slave's name changed - he's no longer primary */
3174
			RCU_INIT_POINTER(bond->primary_slave, NULL);
3175 3176
		} else if (!strcmp(slave_dev->name, bond->params.primary)) {
			/* we have a new primary slave */
3177
			rcu_assign_pointer(bond->primary_slave, slave);
3178 3179 3180 3181
		} else { /* we didn't change primary - exit */
			break;
		}

3182
		netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3183
			    primary ? slave_dev->name : "none");
3184 3185

		block_netpoll_tx();
3186
		bond_select_active_slave(bond);
3187
		unblock_netpoll_tx();
L
Linus Torvalds 已提交
3188
		break;
3189 3190 3191
	case NETDEV_FEAT_CHANGE:
		bond_compute_features(bond);
		break;
3192 3193 3194 3195
	case NETDEV_RESEND_IGMP:
		/* Propagate to master device */
		call_netdevice_notifiers(event, slave->bond->dev);
		break;
L
Linus Torvalds 已提交
3196 3197 3198 3199 3200 3201 3202
	default:
		break;
	}

	return NOTIFY_DONE;
}

3203
/* bond_netdev_event: handle netdev notifier chain events.
L
Linus Torvalds 已提交
3204 3205
 *
 * This function receives events for the netdev chain.  The caller (an
3206
 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
L
Linus Torvalds 已提交
3207 3208 3209
 * locks for us to safely manipulate the slave devices (RTNL lock,
 * dev_probe_lock).
 */
S
Stephen Hemminger 已提交
3210 3211
static int bond_netdev_event(struct notifier_block *this,
			     unsigned long event, void *ptr)
L
Linus Torvalds 已提交
3212
{
3213
	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
L
Linus Torvalds 已提交
3214

3215
	netdev_dbg(event_dev, "event: %lx\n", event);
L
Linus Torvalds 已提交
3216

3217 3218 3219
	if (!(event_dev->priv_flags & IFF_BONDING))
		return NOTIFY_DONE;

L
Linus Torvalds 已提交
3220
	if (event_dev->flags & IFF_MASTER) {
3221
		netdev_dbg(event_dev, "IFF_MASTER\n");
L
Linus Torvalds 已提交
3222 3223 3224 3225
		return bond_master_netdev_event(event, event_dev);
	}

	if (event_dev->flags & IFF_SLAVE) {
3226
		netdev_dbg(event_dev, "IFF_SLAVE\n");
L
Linus Torvalds 已提交
3227 3228 3229 3230 3231 3232 3233 3234 3235 3236
		return bond_slave_netdev_event(event, event_dev);
	}

	return NOTIFY_DONE;
}

static struct notifier_block bond_netdev_notifier = {
	.notifier_call = bond_netdev_event,
};

3237 3238
/*---------------------------- Hashing Policies -----------------------------*/

3239 3240
/* L2 hash helper */
static inline u32 bond_eth_hash(struct sk_buff *skb)
3241
{
3242
	struct ethhdr *ep, hdr_tmp;
3243

3244 3245 3246
	ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
	if (ep)
		return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
3247 3248 3249
	return 0;
}

3250 3251 3252
/* Extract the appropriate headers based on bond's xmit policy */
static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
			      struct flow_keys *fk)
3253
{
3254
	const struct ipv6hdr *iph6;
3255
	const struct iphdr *iph;
3256
	int noff, proto = -1;
3257

3258
	if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
3259
		return skb_flow_dissect_flow_keys(skb, fk, 0);
3260

3261
	fk->ports.ports = 0;
3262 3263
	noff = skb_network_offset(skb);
	if (skb->protocol == htons(ETH_P_IP)) {
3264
		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
3265
			return false;
3266
		iph = ip_hdr(skb);
3267
		iph_to_flow_copy_v4addrs(fk, iph);
3268 3269 3270 3271
		noff += iph->ihl << 2;
		if (!ip_is_fragment(iph))
			proto = iph->protocol;
	} else if (skb->protocol == htons(ETH_P_IPV6)) {
3272
		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
3273 3274
			return false;
		iph6 = ipv6_hdr(skb);
3275
		iph_to_flow_copy_v6addrs(fk, iph6);
3276 3277 3278 3279
		noff += sizeof(*iph6);
		proto = iph6->nexthdr;
	} else {
		return false;
3280
	}
3281
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
3282
		fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
3283

3284
	return true;
3285 3286
}

3287 3288 3289 3290 3291 3292 3293
/**
 * bond_xmit_hash - generate a hash value based on the xmit policy
 * @bond: bonding device
 * @skb: buffer to use for headers
 *
 * This function will extract the necessary headers from the skb buffer and use
 * them to generate a hash based on the xmit_policy set in the bonding device
3294
 */
3295
u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
3296
{
3297 3298
	struct flow_keys flow;
	u32 hash;
3299

E
Eric Dumazet 已提交
3300 3301 3302 3303
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
	    skb->l4_hash)
		return skb->hash;

3304 3305
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
	    !bond_flow_dissect(bond, skb, &flow))
3306
		return bond_eth_hash(skb);
3307

3308 3309 3310 3311
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
		hash = bond_eth_hash(skb);
	else
3312
		hash = (__force u32)flow.ports.ports;
3313 3314
	hash ^= (__force u32)flow_get_u32_dst(&flow) ^
		(__force u32)flow_get_u32_src(&flow);
3315 3316 3317
	hash ^= (hash >> 16);
	hash ^= (hash >> 8);

3318
	return hash >> 1;
3319 3320
}

L
Linus Torvalds 已提交
3321 3322
/*-------------------------- Device entry points ----------------------------*/

3323
void bond_work_init_all(struct bonding *bond)
3324 3325 3326 3327 3328
{
	INIT_DELAYED_WORK(&bond->mcast_work,
			  bond_resend_igmp_join_requests_delayed);
	INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
	INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3329
	INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
3330
	INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3331
	INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
3332 3333 3334 3335 3336 3337 3338 3339 3340
}

static void bond_work_cancel_all(struct bonding *bond)
{
	cancel_delayed_work_sync(&bond->mii_work);
	cancel_delayed_work_sync(&bond->arp_work);
	cancel_delayed_work_sync(&bond->alb_work);
	cancel_delayed_work_sync(&bond->ad_work);
	cancel_delayed_work_sync(&bond->mcast_work);
3341
	cancel_delayed_work_sync(&bond->slave_arr_work);
3342 3343
}

L
Linus Torvalds 已提交
3344 3345
static int bond_open(struct net_device *bond_dev)
{
3346
	struct bonding *bond = netdev_priv(bond_dev);
3347
	struct list_head *iter;
3348
	struct slave *slave;
L
Linus Torvalds 已提交
3349

3350
	/* reset slave->backup and slave->inactive */
3351
	if (bond_has_slaves(bond)) {
3352
		bond_for_each_slave(bond, slave, iter) {
3353 3354
			if (bond_uses_primary(bond) &&
			    slave != rcu_access_pointer(bond->curr_active_slave)) {
3355 3356
				bond_set_slave_inactive_flags(slave,
							      BOND_SLAVE_NOTIFY_NOW);
3357
			} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
3358 3359
				bond_set_slave_active_flags(slave,
							    BOND_SLAVE_NOTIFY_NOW);
3360 3361 3362 3363
			}
		}
	}

3364
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
3365 3366 3367
		/* bond_alb_initialize must be called before the timer
		 * is started.
		 */
3368
		if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3369
			return -ENOMEM;
3370
		if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
3371
			queue_delayed_work(bond->wq, &bond->alb_work, 0);
L
Linus Torvalds 已提交
3372 3373
	}

3374
	if (bond->params.miimon)  /* link check interval, in milliseconds. */
3375
		queue_delayed_work(bond->wq, &bond->mii_work, 0);
L
Linus Torvalds 已提交
3376 3377

	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
3378
		queue_delayed_work(bond->wq, &bond->arp_work, 0);
3379
		bond->recv_probe = bond_arp_rcv;
L
Linus Torvalds 已提交
3380 3381
	}

3382
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3383
		queue_delayed_work(bond->wq, &bond->ad_work, 0);
L
Linus Torvalds 已提交
3384
		/* register to receive LACPDUs */
3385
		bond->recv_probe = bond_3ad_lacpdu_recv;
3386
		bond_3ad_initiate_agg_selection(bond, 1);
L
Linus Torvalds 已提交
3387 3388
	}

3389
	if (bond_mode_can_use_xmit_hash(bond))
3390 3391
		bond_update_slave_arr(bond, NULL);

L
Linus Torvalds 已提交
3392 3393 3394 3395 3396
	return 0;
}

static int bond_close(struct net_device *bond_dev)
{
3397
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3398

3399
	bond_work_cancel_all(bond);
3400
	bond->send_peer_notif = 0;
3401
	if (bond_is_lb(bond))
L
Linus Torvalds 已提交
3402
		bond_alb_deinitialize(bond);
3403
	bond->recv_probe = NULL;
L
Linus Torvalds 已提交
3404 3405 3406 3407

	return 0;
}

E
Eric Dumazet 已提交
3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422
/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
 * that some drivers can provide 32bit values only.
 */
static void bond_fold_stats(struct rtnl_link_stats64 *_res,
			    const struct rtnl_link_stats64 *_new,
			    const struct rtnl_link_stats64 *_old)
{
	const u64 *new = (const u64 *)_new;
	const u64 *old = (const u64 *)_old;
	u64 *res = (u64 *)_res;
	int i;

	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
		u64 nv = new[i];
		u64 ov = old[i];
3423
		s64 delta = nv - ov;
E
Eric Dumazet 已提交
3424 3425 3426

		/* detects if this particular field is 32bit only */
		if (((nv | ov) >> 32) == 0)
3427 3428 3429 3430 3431 3432 3433
			delta = (s64)(s32)((u32)nv - (u32)ov);

		/* filter anomalies, some drivers reset their stats
		 * at down/up events.
		 */
		if (delta > 0)
			res[i] += delta;
E
Eric Dumazet 已提交
3434 3435 3436
	}
}

3437 3438 3439 3440 3441 3442 3443
static int bond_get_nest_level(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);

	return bond->nest_level;
}

3444 3445
static void bond_get_stats(struct net_device *bond_dev,
			   struct rtnl_link_stats64 *stats)
L
Linus Torvalds 已提交
3446
{
3447
	struct bonding *bond = netdev_priv(bond_dev);
3448
	struct rtnl_link_stats64 temp;
3449
	struct list_head *iter;
L
Linus Torvalds 已提交
3450 3451
	struct slave *slave;

3452
	spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
3453
	memcpy(stats, &bond->bond_stats, sizeof(*stats));
L
Linus Torvalds 已提交
3454

E
Eric Dumazet 已提交
3455 3456 3457
	rcu_read_lock();
	bond_for_each_slave_rcu(bond, slave, iter) {
		const struct rtnl_link_stats64 *new =
3458
			dev_get_stats(slave->dev, &temp);
E
Eric Dumazet 已提交
3459 3460

		bond_fold_stats(stats, new, &slave->slave_stats);
3461 3462

		/* save off the slave stats for the next run */
E
Eric Dumazet 已提交
3463
		memcpy(&slave->slave_stats, new, sizeof(*new));
3464
	}
E
Eric Dumazet 已提交
3465 3466
	rcu_read_unlock();

3467
	memcpy(&bond->bond_stats, stats, sizeof(*stats));
E
Eric Dumazet 已提交
3468
	spin_unlock(&bond->stats_lock);
L
Linus Torvalds 已提交
3469 3470 3471 3472
}

static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
3473
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3474 3475 3476 3477 3478 3479
	struct net_device *slave_dev = NULL;
	struct ifbond k_binfo;
	struct ifbond __user *u_binfo = NULL;
	struct ifslave k_sinfo;
	struct ifslave __user *u_sinfo = NULL;
	struct mii_ioctl_data *mii = NULL;
3480
	struct bond_opt_value newval;
3481
	struct net *net;
L
Linus Torvalds 已提交
3482 3483
	int res = 0;

3484
	netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
L
Linus Torvalds 已提交
3485 3486 3487 3488

	switch (cmd) {
	case SIOCGMIIPHY:
		mii = if_mii(ifr);
S
Stephen Hemminger 已提交
3489
		if (!mii)
L
Linus Torvalds 已提交
3490
			return -EINVAL;
S
Stephen Hemminger 已提交
3491

L
Linus Torvalds 已提交
3492 3493 3494
		mii->phy_id = 0;
		/* Fall Through */
	case SIOCGMIIREG:
3495
		/* We do this again just in case we were called by SIOCGMIIREG
L
Linus Torvalds 已提交
3496 3497 3498
		 * instead of SIOCGMIIPHY.
		 */
		mii = if_mii(ifr);
S
Stephen Hemminger 已提交
3499
		if (!mii)
L
Linus Torvalds 已提交
3500
			return -EINVAL;
S
Stephen Hemminger 已提交
3501

L
Linus Torvalds 已提交
3502 3503
		if (mii->reg_num == 1) {
			mii->val_out = 0;
S
Stephen Hemminger 已提交
3504
			if (netif_carrier_ok(bond->dev))
L
Linus Torvalds 已提交
3505 3506 3507 3508 3509 3510 3511 3512
				mii->val_out = BMSR_LSTATUS;
		}

		return 0;
	case BOND_INFO_QUERY_OLD:
	case SIOCBONDINFOQUERY:
		u_binfo = (struct ifbond __user *)ifr->ifr_data;

S
Stephen Hemminger 已提交
3513
		if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
L
Linus Torvalds 已提交
3514 3515
			return -EFAULT;

3516 3517
		bond_info_query(bond_dev, &k_binfo);
		if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
S
Stephen Hemminger 已提交
3518
			return -EFAULT;
L
Linus Torvalds 已提交
3519

3520
		return 0;
L
Linus Torvalds 已提交
3521 3522 3523 3524
	case BOND_SLAVE_INFO_QUERY_OLD:
	case SIOCBONDSLAVEINFOQUERY:
		u_sinfo = (struct ifslave __user *)ifr->ifr_data;

S
Stephen Hemminger 已提交
3525
		if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
L
Linus Torvalds 已提交
3526 3527 3528
			return -EFAULT;

		res = bond_slave_info_query(bond_dev, &k_sinfo);
S
Stephen Hemminger 已提交
3529 3530 3531
		if (res == 0 &&
		    copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
			return -EFAULT;
L
Linus Torvalds 已提交
3532 3533 3534 3535 3536 3537

		return res;
	default:
		break;
	}

3538 3539 3540
	net = dev_net(bond_dev);

	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
L
Linus Torvalds 已提交
3541 3542
		return -EPERM;

3543
	slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
L
Linus Torvalds 已提交
3544

3545
	netdev_dbg(bond_dev, "slave_dev=%p:\n", slave_dev);
L
Linus Torvalds 已提交
3546

S
Stephen Hemminger 已提交
3547
	if (!slave_dev)
3548
		return -ENODEV;
L
Linus Torvalds 已提交
3549

3550
	netdev_dbg(bond_dev, "slave_dev->name=%s:\n", slave_dev->name);
3551 3552 3553
	switch (cmd) {
	case BOND_ENSLAVE_OLD:
	case SIOCBONDENSLAVE:
D
David Ahern 已提交
3554
		res = bond_enslave(bond_dev, slave_dev, NULL);
3555 3556 3557 3558 3559 3560 3561
		break;
	case BOND_RELEASE_OLD:
	case SIOCBONDRELEASE:
		res = bond_release(bond_dev, slave_dev);
		break;
	case BOND_SETHWADDR_OLD:
	case SIOCBONDSETHWADDR:
3562
		res = bond_set_dev_addr(bond_dev, slave_dev);
3563 3564 3565
		break;
	case BOND_CHANGE_ACTIVE_OLD:
	case SIOCBONDCHANGEACTIVE:
3566
		bond_opt_initstr(&newval, slave_dev->name);
3567 3568
		res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
					    &newval);
3569 3570 3571
		break;
	default:
		res = -EOPNOTSUPP;
L
Linus Torvalds 已提交
3572 3573 3574 3575 3576
	}

	return res;
}

3577
static void bond_change_rx_flags(struct net_device *bond_dev, int change)
L
Linus Torvalds 已提交
3578
{
3579
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3580

3581 3582 3583
	if (change & IFF_PROMISC)
		bond_set_promiscuity(bond,
				     bond_dev->flags & IFF_PROMISC ? 1 : -1);
S
Stephen Hemminger 已提交
3584

3585 3586 3587 3588
	if (change & IFF_ALLMULTI)
		bond_set_allmulti(bond,
				  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
}
L
Linus Torvalds 已提交
3589

3590
static void bond_set_rx_mode(struct net_device *bond_dev)
3591 3592
{
	struct bonding *bond = netdev_priv(bond_dev);
3593
	struct list_head *iter;
3594
	struct slave *slave;
L
Linus Torvalds 已提交
3595

3596
	rcu_read_lock();
3597
	if (bond_uses_primary(bond)) {
3598
		slave = rcu_dereference(bond->curr_active_slave);
3599 3600 3601 3602 3603
		if (slave) {
			dev_uc_sync(slave->dev, bond_dev);
			dev_mc_sync(slave->dev, bond_dev);
		}
	} else {
3604
		bond_for_each_slave_rcu(bond, slave, iter) {
3605 3606 3607
			dev_uc_sync_multiple(slave->dev, bond_dev);
			dev_mc_sync_multiple(slave->dev, bond_dev);
		}
L
Linus Torvalds 已提交
3608
	}
3609
	rcu_read_unlock();
L
Linus Torvalds 已提交
3610 3611
}

3612
static int bond_neigh_init(struct neighbour *n)
3613
{
3614 3615 3616
	struct bonding *bond = netdev_priv(n->dev);
	const struct net_device_ops *slave_ops;
	struct neigh_parms parms;
3617
	struct slave *slave;
3618 3619
	int ret;

3620
	slave = bond_first_slave(bond);
3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632
	if (!slave)
		return 0;
	slave_ops = slave->dev->netdev_ops;
	if (!slave_ops->ndo_neigh_setup)
		return 0;

	parms.neigh_setup = NULL;
	parms.neigh_cleanup = NULL;
	ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
	if (ret)
		return ret;

3633
	/* Assign slave's neigh_cleanup to neighbour in case cleanup is called
3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645
	 * after the last slave has been detached.  Assumes that all slaves
	 * utilize the same neigh_cleanup (true at this writing as only user
	 * is ipoib).
	 */
	n->parms->neigh_cleanup = parms.neigh_cleanup;

	if (!parms.neigh_setup)
		return 0;

	return parms.neigh_setup(n);
}

3646
/* The bonding ndo_neigh_setup is called at init time beofre any
3647 3648
 * slave exists. So we must declare proxy setup function which will
 * be used at run time to resolve the actual slave neigh param setup.
3649 3650 3651 3652
 *
 * It's also called by master devices (such as vlans) to setup their
 * underlying devices. In that case - do nothing, we're already set up from
 * our init.
3653 3654 3655 3656
 */
static int bond_neigh_setup(struct net_device *dev,
			    struct neigh_parms *parms)
{
3657 3658 3659
	/* modify only our neigh_parms */
	if (parms->dev == dev)
		parms->neigh_setup = bond_neigh_init;
3660 3661 3662 3663

	return 0;
}

3664
/* Change the MTU of all of a master's slaves to match the master */
L
Linus Torvalds 已提交
3665 3666
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
3667
	struct bonding *bond = netdev_priv(bond_dev);
3668
	struct slave *slave, *rollback_slave;
3669
	struct list_head *iter;
L
Linus Torvalds 已提交
3670 3671
	int res = 0;

3672
	netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
L
Linus Torvalds 已提交
3673

3674
	bond_for_each_slave(bond, slave, iter) {
3675 3676
		netdev_dbg(bond_dev, "s %p c_m %p\n",
			   slave, slave->dev->netdev_ops->ndo_change_mtu);
3677

L
Linus Torvalds 已提交
3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688
		res = dev_set_mtu(slave->dev, new_mtu);

		if (res) {
			/* If we failed to set the slave's mtu to the new value
			 * we must abort the operation even in ACTIVE_BACKUP
			 * mode, because if we allow the backup slaves to have
			 * different mtu values than the active slave we'll
			 * need to change their mtu when doing a failover. That
			 * means changing their mtu from timer context, which
			 * is probably not a good idea.
			 */
3689 3690
			netdev_dbg(bond_dev, "err %d %s\n", res,
				   slave->dev->name);
L
Linus Torvalds 已提交
3691 3692 3693 3694 3695 3696 3697 3698 3699 3700
			goto unwind;
		}
	}

	bond_dev->mtu = new_mtu;

	return 0;

unwind:
	/* unwind from head to the slave that failed */
3701
	bond_for_each_slave(bond, rollback_slave, iter) {
L
Linus Torvalds 已提交
3702 3703
		int tmp_res;

3704 3705 3706 3707
		if (rollback_slave == slave)
			break;

		tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
L
Linus Torvalds 已提交
3708
		if (tmp_res) {
3709 3710
			netdev_dbg(bond_dev, "unwind err %d dev %s\n",
				   tmp_res, rollback_slave->dev->name);
L
Linus Torvalds 已提交
3711 3712 3713 3714 3715 3716
		}
	}

	return res;
}

3717
/* Change HW address
L
Linus Torvalds 已提交
3718 3719 3720 3721 3722 3723 3724
 *
 * Note that many devices must be down to change the HW address, and
 * downing the master releases all slaves.  We can make bonds full of
 * bonding devices to test this, however.
 */
static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
3725
	struct bonding *bond = netdev_priv(bond_dev);
3726
	struct slave *slave, *rollback_slave;
3727
	struct sockaddr_storage *ss = addr, tmp_ss;
3728
	struct list_head *iter;
L
Linus Torvalds 已提交
3729 3730
	int res = 0;

3731
	if (BOND_MODE(bond) == BOND_MODE_ALB)
3732 3733 3734
		return bond_alb_set_mac_address(bond_dev, addr);


3735
	netdev_dbg(bond_dev, "bond=%p\n", bond);
L
Linus Torvalds 已提交
3736

3737 3738
	/* If fail_over_mac is enabled, do nothing and return success.
	 * Returning an error causes ifenslave to fail.
3739
	 */
3740
	if (bond->params.fail_over_mac &&
3741
	    BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3742
		return 0;
3743

3744
	if (!is_valid_ether_addr(ss->__data))
L
Linus Torvalds 已提交
3745 3746
		return -EADDRNOTAVAIL;

3747
	bond_for_each_slave(bond, slave, iter) {
3748
		netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name);
3749
		res = dev_set_mac_address(slave->dev, addr, NULL);
L
Linus Torvalds 已提交
3750 3751 3752 3753 3754 3755 3756
		if (res) {
			/* TODO: consider downing the slave
			 * and retry ?
			 * User should expect communications
			 * breakage anyway until ARP finish
			 * updating, so...
			 */
3757
			netdev_dbg(bond_dev, "err %d %s\n", res, slave->dev->name);
L
Linus Torvalds 已提交
3758 3759 3760 3761 3762
			goto unwind;
		}
	}

	/* success */
3763
	memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
L
Linus Torvalds 已提交
3764 3765 3766
	return 0;

unwind:
3767 3768
	memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
	tmp_ss.ss_family = bond_dev->type;
L
Linus Torvalds 已提交
3769 3770

	/* unwind from head to the slave that failed */
3771
	bond_for_each_slave(bond, rollback_slave, iter) {
L
Linus Torvalds 已提交
3772 3773
		int tmp_res;

3774 3775 3776
		if (rollback_slave == slave)
			break;

3777
		tmp_res = dev_set_mac_address(rollback_slave->dev,
3778
					      (struct sockaddr *)&tmp_ss, NULL);
L
Linus Torvalds 已提交
3779
		if (tmp_res) {
3780 3781
			netdev_dbg(bond_dev, "unwind err %d dev %s\n",
				   tmp_res, rollback_slave->dev->name);
L
Linus Torvalds 已提交
3782 3783 3784 3785 3786 3787
		}
	}

	return res;
}

3788 3789 3790 3791 3792 3793 3794 3795 3796 3797
/**
 * bond_xmit_slave_id - transmit skb through slave with slave_id
 * @bond: bonding device that is transmitting
 * @skb: buffer to transmit
 * @slave_id: slave id up to slave_cnt-1 through which to transmit
 *
 * This function tries to transmit through slave with slave_id but in case
 * it fails, it tries to find the first available slave for transmission.
 * The skb is consumed in all cases, thus the function is void.
 */
3798
static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
3799
{
3800
	struct list_head *iter;
3801 3802 3803 3804
	struct slave *slave;
	int i = slave_id;

	/* Here we start from the slave with slave_id */
3805
	bond_for_each_slave_rcu(bond, slave, iter) {
3806
		if (--i < 0) {
3807
			if (bond_slave_can_tx(slave)) {
3808 3809 3810 3811 3812 3813 3814 3815
				bond_dev_queue_xmit(bond, skb, slave->dev);
				return;
			}
		}
	}

	/* Here we start from the first slave up to slave_id */
	i = slave_id;
3816
	bond_for_each_slave_rcu(bond, slave, iter) {
3817 3818
		if (--i < 0)
			break;
3819
		if (bond_slave_can_tx(slave)) {
3820 3821 3822 3823 3824
			bond_dev_queue_xmit(bond, skb, slave->dev);
			return;
		}
	}
	/* no slave that can tx has been found */
E
Eric Dumazet 已提交
3825
	bond_tx_drop(bond->dev, skb);
3826 3827
}

3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838
/**
 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
 * @bond: bonding device to use
 *
 * Based on the value of the bonding device's packets_per_slave parameter
 * this function generates a slave id, which is usually used as the next
 * slave to transmit through.
 */
static u32 bond_rr_gen_slave_id(struct bonding *bond)
{
	u32 slave_id;
3839 3840
	struct reciprocal_value reciprocal_packets_per_slave;
	int packets_per_slave = bond->params.packets_per_slave;
3841 3842 3843 3844 3845 3846 3847 3848 3849

	switch (packets_per_slave) {
	case 0:
		slave_id = prandom_u32();
		break;
	case 1:
		slave_id = bond->rr_tx_counter;
		break;
	default:
3850 3851
		reciprocal_packets_per_slave =
			bond->params.reciprocal_packets_per_slave;
3852
		slave_id = reciprocal_divide(bond->rr_tx_counter,
3853
					     reciprocal_packets_per_slave);
3854 3855 3856 3857 3858 3859 3860
		break;
	}
	bond->rr_tx_counter++;

	return slave_id;
}

3861 3862
static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
					struct net_device *bond_dev)
L
Linus Torvalds 已提交
3863
{
3864
	struct bonding *bond = netdev_priv(bond_dev);
3865
	struct iphdr *iph = ip_hdr(skb);
3866
	struct slave *slave;
3867
	u32 slave_id;
L
Linus Torvalds 已提交
3868

3869
	/* Start with the curr_active_slave that joined the bond as the
3870 3871 3872 3873
	 * default for sending IGMP traffic.  For failover purposes one
	 * needs to maintain some consistency for the interface that will
	 * send the join/membership reports.  The curr_active_slave found
	 * will send all of this type of traffic.
3874
	 */
3875
	if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
3876
		slave = rcu_dereference(bond->curr_active_slave);
3877
		if (slave)
3878 3879 3880
			bond_dev_queue_xmit(bond, skb, slave->dev);
		else
			bond_xmit_slave_id(bond, skb, 0);
3881
	} else {
3882
		int slave_cnt = READ_ONCE(bond->slave_cnt);
3883 3884 3885 3886 3887

		if (likely(slave_cnt)) {
			slave_id = bond_rr_gen_slave_id(bond);
			bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
		} else {
E
Eric Dumazet 已提交
3888
			bond_tx_drop(bond_dev, skb);
3889
		}
L
Linus Torvalds 已提交
3890
	}
3891

3892
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
3893 3894
}

3895
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
L
Linus Torvalds 已提交
3896 3897
 * the bond has a usable interface.
 */
3898 3899
static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
					  struct net_device *bond_dev)
L
Linus Torvalds 已提交
3900
{
3901
	struct bonding *bond = netdev_priv(bond_dev);
3902
	struct slave *slave;
L
Linus Torvalds 已提交
3903

3904
	slave = rcu_dereference(bond->curr_active_slave);
3905
	if (slave)
3906 3907
		bond_dev_queue_xmit(bond, skb, slave->dev);
	else
E
Eric Dumazet 已提交
3908
		bond_tx_drop(bond_dev, skb);
3909

3910
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
3911 3912
}

3913 3914 3915
/* Use this to update slave_array when (a) it's not appropriate to update
 * slave_array right away (note that update_slave_array() may sleep)
 * and / or (b) RTNL is not held.
L
Linus Torvalds 已提交
3916
 */
3917
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
L
Linus Torvalds 已提交
3918
{
3919 3920
	queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
}
L
Linus Torvalds 已提交
3921

3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947
/* Slave array work handler. Holds only RTNL */
static void bond_slave_arr_handler(struct work_struct *work)
{
	struct bonding *bond = container_of(work, struct bonding,
					    slave_arr_work.work);
	int ret;

	if (!rtnl_trylock())
		goto err;

	ret = bond_update_slave_arr(bond, NULL);
	rtnl_unlock();
	if (ret) {
		pr_warn_ratelimited("Failed to update slave array from WT\n");
		goto err;
	}
	return;

err:
	bond_slave_arr_work_rearm(bond, 1);
}

/* Build the usable slaves array in control path for modes that use xmit-hash
 * to determine the slave interface -
 * (a) BOND_MODE_8023AD
 * (b) BOND_MODE_XOR
3948
 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000
 *
 * The caller is expected to hold RTNL only and NO other lock!
 */
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
{
	struct slave *slave;
	struct list_head *iter;
	struct bond_up_slave *new_arr, *old_arr;
	int agg_id = 0;
	int ret = 0;

#ifdef CONFIG_LOCKDEP
	WARN_ON(lockdep_is_held(&bond->mode_lock));
#endif

	new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]),
			  GFP_KERNEL);
	if (!new_arr) {
		ret = -ENOMEM;
		pr_err("Failed to build slave-array.\n");
		goto out;
	}
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
		struct ad_info ad_info;

		if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
			pr_debug("bond_3ad_get_active_agg_info failed\n");
			kfree_rcu(new_arr, rcu);
			/* No active aggragator means it's not safe to use
			 * the previous array.
			 */
			old_arr = rtnl_dereference(bond->slave_arr);
			if (old_arr) {
				RCU_INIT_POINTER(bond->slave_arr, NULL);
				kfree_rcu(old_arr, rcu);
			}
			goto out;
		}
		agg_id = ad_info.aggregator_id;
	}
	bond_for_each_slave(bond, slave, iter) {
		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			struct aggregator *agg;

			agg = SLAVE_AD_INFO(slave)->port.aggregator;
			if (!agg || agg->aggregator_identifier != agg_id)
				continue;
		}
		if (!bond_slave_can_tx(slave))
			continue;
		if (skipslave == slave)
			continue;
4001 4002 4003 4004 4005

		netdev_dbg(bond->dev,
			   "Adding slave dev %s to tx hash array[%d]\n",
			   slave->dev->name, new_arr->count);

4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041
		new_arr->arr[new_arr->count++] = slave;
	}

	old_arr = rtnl_dereference(bond->slave_arr);
	rcu_assign_pointer(bond->slave_arr, new_arr);
	if (old_arr)
		kfree_rcu(old_arr, rcu);
out:
	if (ret != 0 && skipslave) {
		int idx;

		/* Rare situation where caller has asked to skip a specific
		 * slave but allocation failed (most likely!). BTW this is
		 * only possible when the call is initiated from
		 * __bond_release_one(). In this situation; overwrite the
		 * skipslave entry in the array with the last entry from the
		 * array to avoid a situation where the xmit path may choose
		 * this to-be-skipped slave to send a packet out.
		 */
		old_arr = rtnl_dereference(bond->slave_arr);
		for (idx = 0; idx < old_arr->count; idx++) {
			if (skipslave == old_arr->arr[idx]) {
				old_arr->arr[idx] =
				    old_arr->arr[old_arr->count-1];
				old_arr->count--;
				break;
			}
		}
	}
	return ret;
}

/* Use this Xmit function for 3AD as well as XOR modes. The current
 * usable slave array is formed in the control path. The xmit function
 * just calculates hash and sends the packet out.
 */
4042 4043
static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
				     struct net_device *dev)
4044 4045 4046 4047 4048 4049 4050
{
	struct bonding *bond = netdev_priv(dev);
	struct slave *slave;
	struct bond_up_slave *slaves;
	unsigned int count;

	slaves = rcu_dereference(bond->slave_arr);
4051
	count = slaves ? READ_ONCE(slaves->count) : 0;
4052 4053 4054 4055
	if (likely(count)) {
		slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
		bond_dev_queue_xmit(bond, skb, slave->dev);
	} else {
E
Eric Dumazet 已提交
4056
		bond_tx_drop(dev, skb);
4057
	}
4058

4059
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
4060 4061
}

4062
/* in broadcast mode, we send everything to all usable interfaces. */
4063 4064
static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
				       struct net_device *bond_dev)
L
Linus Torvalds 已提交
4065
{
4066
	struct bonding *bond = netdev_priv(bond_dev);
4067
	struct slave *slave = NULL;
4068
	struct list_head *iter;
L
Linus Torvalds 已提交
4069

4070
	bond_for_each_slave_rcu(bond, slave, iter) {
4071 4072
		if (bond_is_last_slave(bond, slave))
			break;
4073
		if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
4074
			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
L
Linus Torvalds 已提交
4075

4076
			if (!skb2) {
4077 4078
				net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
						    bond_dev->name, __func__);
4079
				continue;
L
Linus Torvalds 已提交
4080
			}
4081
			bond_dev_queue_xmit(bond, skb2, slave->dev);
L
Linus Torvalds 已提交
4082 4083
		}
	}
4084
	if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
4085 4086
		bond_dev_queue_xmit(bond, skb, slave->dev);
	else
E
Eric Dumazet 已提交
4087
		bond_tx_drop(bond_dev, skb);
S
Stephen Hemminger 已提交
4088

4089
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
4090 4091 4092 4093
}

/*------------------------- Device initialization ---------------------------*/

4094
/* Lookup the slave that corresponds to a qid */
4095 4096 4097 4098
static inline int bond_slave_override(struct bonding *bond,
				      struct sk_buff *skb)
{
	struct slave *slave = NULL;
4099
	struct list_head *iter;
4100

4101
	if (!skb_rx_queue_recorded(skb))
4102
		return 1;
4103 4104

	/* Find out if any slaves have the same mapping as this skb. */
4105
	bond_for_each_slave_rcu(bond, slave, iter) {
4106
		if (slave->queue_id == skb_get_queue_mapping(skb)) {
4107 4108
			if (bond_slave_is_up(slave) &&
			    slave->link == BOND_LINK_UP) {
4109 4110 4111 4112
				bond_dev_queue_xmit(bond, skb, slave->dev);
				return 0;
			}
			/* If the slave isn't UP, use default transmit policy. */
4113 4114 4115 4116
			break;
		}
	}

4117
	return 1;
4118 4119
}

4120

4121
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
4122 4123
			     struct net_device *sb_dev,
			     select_queue_fallback_t fallback)
4124
{
4125
	/* This helper function exists to help dev_pick_tx get the correct
P
Phil Oester 已提交
4126
	 * destination queue.  Using a helper function skips a call to
4127 4128 4129
	 * skb_tx_hash and will put the skbs in the queue we expect on their
	 * way down to the bonding driver.
	 */
P
Phil Oester 已提交
4130 4131
	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;

4132
	/* Save the original txq to restore before passing to the driver */
4133
	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
4134

P
Phil Oester 已提交
4135
	if (unlikely(txq >= dev->real_num_tx_queues)) {
4136
		do {
P
Phil Oester 已提交
4137
			txq -= dev->real_num_tx_queues;
4138
		} while (txq >= dev->real_num_tx_queues);
P
Phil Oester 已提交
4139 4140
	}
	return txq;
4141 4142
}

4143
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4144
{
4145 4146
	struct bonding *bond = netdev_priv(dev);

4147 4148 4149
	if (bond_should_override_tx_queue(bond) &&
	    !bond_slave_override(bond, skb))
		return NETDEV_TX_OK;
4150

4151
	switch (BOND_MODE(bond)) {
4152 4153 4154 4155
	case BOND_MODE_ROUNDROBIN:
		return bond_xmit_roundrobin(skb, dev);
	case BOND_MODE_ACTIVEBACKUP:
		return bond_xmit_activebackup(skb, dev);
4156
	case BOND_MODE_8023AD:
4157
	case BOND_MODE_XOR:
4158
		return bond_3ad_xor_xmit(skb, dev);
4159 4160 4161 4162
	case BOND_MODE_BROADCAST:
		return bond_xmit_broadcast(skb, dev);
	case BOND_MODE_ALB:
		return bond_alb_xmit(skb, dev);
4163 4164
	case BOND_MODE_TLB:
		return bond_tlb_xmit(skb, dev);
4165 4166
	default:
		/* Should never happen, mode already checked */
4167
		netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
4168
		WARN_ON_ONCE(1);
E
Eric Dumazet 已提交
4169
		bond_tx_drop(dev, skb);
4170 4171 4172 4173
		return NETDEV_TX_OK;
	}
}

4174 4175 4176 4177 4178
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct bonding *bond = netdev_priv(dev);
	netdev_tx_t ret = NETDEV_TX_OK;

4179
	/* If we risk deadlock from transmitting this in the
4180 4181
	 * netpoll path, tell netpoll to queue the frame for later tx
	 */
4182
	if (unlikely(is_netpoll_tx_blocked(dev)))
4183 4184
		return NETDEV_TX_BUSY;

4185
	rcu_read_lock();
4186
	if (bond_has_slaves(bond))
4187 4188
		ret = __bond_start_xmit(skb, dev);
	else
E
Eric Dumazet 已提交
4189
		bond_tx_drop(dev, skb);
4190
	rcu_read_unlock();
4191 4192 4193

	return ret;
}
4194

4195 4196
static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
					   struct ethtool_link_ksettings *cmd)
4197 4198 4199
{
	struct bonding *bond = netdev_priv(bond_dev);
	unsigned long speed = 0;
4200
	struct list_head *iter;
4201
	struct slave *slave;
4202

4203 4204
	cmd->base.duplex = DUPLEX_UNKNOWN;
	cmd->base.port = PORT_OTHER;
4205

4206
	/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
4207 4208 4209 4210
	 * do not need to check mode.  Though link speed might not represent
	 * the true receive or transmit bandwidth (not all modes are symmetric)
	 * this is an accurate maximum.
	 */
4211
	bond_for_each_slave(bond, slave, iter) {
4212
		if (bond_slave_can_tx(slave)) {
4213 4214
			if (slave->speed != SPEED_UNKNOWN)
				speed += slave->speed;
4215
			if (cmd->base.duplex == DUPLEX_UNKNOWN &&
4216
			    slave->duplex != DUPLEX_UNKNOWN)
4217
				cmd->base.duplex = slave->duplex;
4218 4219
		}
	}
4220
	cmd->base.speed = speed ? : SPEED_UNKNOWN;
4221

4222 4223 4224
	return 0;
}

4225
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4226
				     struct ethtool_drvinfo *drvinfo)
4227
{
4228 4229 4230 4231
	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
		 BOND_ABI_VERSION);
4232 4233
}

4234
static const struct ethtool_ops bond_ethtool_ops = {
4235
	.get_drvinfo		= bond_ethtool_get_drvinfo,
4236
	.get_link		= ethtool_op_get_link,
4237
	.get_link_ksettings	= bond_ethtool_get_link_ksettings,
4238 4239
};

4240
static const struct net_device_ops bond_netdev_ops = {
4241
	.ndo_init		= bond_init,
S
Stephen Hemminger 已提交
4242
	.ndo_uninit		= bond_uninit,
4243 4244
	.ndo_open		= bond_open,
	.ndo_stop		= bond_close,
4245
	.ndo_start_xmit		= bond_start_xmit,
4246
	.ndo_select_queue	= bond_select_queue,
4247
	.ndo_get_stats64	= bond_get_stats,
4248
	.ndo_do_ioctl		= bond_do_ioctl,
4249
	.ndo_change_rx_flags	= bond_change_rx_flags,
4250
	.ndo_set_rx_mode	= bond_set_rx_mode,
4251
	.ndo_change_mtu		= bond_change_mtu,
J
Jiri Pirko 已提交
4252
	.ndo_set_mac_address	= bond_set_mac_address,
4253
	.ndo_neigh_setup	= bond_neigh_setup,
J
Jiri Pirko 已提交
4254
	.ndo_vlan_rx_add_vid	= bond_vlan_rx_add_vid,
4255
	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
4256
	.ndo_get_lock_subclass  = bond_get_nest_level,
4257
#ifdef CONFIG_NET_POLL_CONTROLLER
4258
	.ndo_netpoll_setup	= bond_netpoll_setup,
4259 4260 4261
	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
	.ndo_poll_controller	= bond_poll_controller,
#endif
J
Jiri Pirko 已提交
4262 4263
	.ndo_add_slave		= bond_enslave,
	.ndo_del_slave		= bond_release,
4264
	.ndo_fix_features	= bond_fix_features,
4265
	.ndo_features_check	= passthru_features_check,
4266 4267
};

4268 4269 4270 4271
static const struct device_type bond_type = {
	.name = "bond",
};

4272 4273 4274 4275 4276 4277 4278
static void bond_destructor(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);
	if (bond->wq)
		destroy_workqueue(bond->wq);
}

4279
void bond_setup(struct net_device *bond_dev)
L
Linus Torvalds 已提交
4280
{
4281
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
4282

4283
	spin_lock_init(&bond->mode_lock);
E
Eric Dumazet 已提交
4284
	spin_lock_init(&bond->stats_lock);
4285
	bond->params = bonding_defaults;
L
Linus Torvalds 已提交
4286 4287 4288 4289 4290

	/* Initialize pointers */
	bond->dev = bond_dev;

	/* Initialize the device entry points */
4291
	ether_setup(bond_dev);
W
WANG Cong 已提交
4292
	bond_dev->max_mtu = ETH_MAX_MTU;
4293
	bond_dev->netdev_ops = &bond_netdev_ops;
4294
	bond_dev->ethtool_ops = &bond_ethtool_ops;
L
Linus Torvalds 已提交
4295

4296 4297
	bond_dev->needs_free_netdev = true;
	bond_dev->priv_destructor = bond_destructor;
L
Linus Torvalds 已提交
4298

4299 4300
	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);

L
Linus Torvalds 已提交
4301
	/* Initialize the device options */
4302
	bond_dev->flags |= IFF_MASTER;
4303
	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
4304
	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
4305

4306
	/* don't acquire bond device's netif_tx_lock when transmitting */
L
Linus Torvalds 已提交
4307 4308 4309 4310 4311 4312 4313 4314 4315
	bond_dev->features |= NETIF_F_LLTX;

	/* By default, we declare the bond to be fully
	 * VLAN hardware accelerated capable. Special
	 * care is taken in the various xmit functions
	 * when there are slaves that are not hw accel
	 * capable
	 */

4316 4317 4318
	/* Don't allow bond devices to change network namespaces. */
	bond_dev->features |= NETIF_F_NETNS_LOCAL;

4319
	bond_dev->hw_features = BOND_VLAN_FEATURES |
4320 4321 4322
				NETIF_F_HW_VLAN_CTAG_TX |
				NETIF_F_HW_VLAN_CTAG_RX |
				NETIF_F_HW_VLAN_CTAG_FILTER;
4323

4324
	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
4325
	bond_dev->features |= bond_dev->hw_features;
L
Linus Torvalds 已提交
4326 4327
}

4328 4329 4330
/* Destroy a bonding device.
 * Must be under rtnl_lock when this function is called.
 */
4331
static void bond_uninit(struct net_device *bond_dev)
J
Jay Vosburgh 已提交
4332
{
4333
	struct bonding *bond = netdev_priv(bond_dev);
4334 4335
	struct list_head *iter;
	struct slave *slave;
4336
	struct bond_up_slave *arr;
J
Jay Vosburgh 已提交
4337

4338 4339
	bond_netpoll_cleanup(bond_dev);

4340
	/* Release the bonded slaves */
4341
	bond_for_each_slave(bond, slave, iter)
4342
		__bond_release_one(bond_dev, slave->dev, true, true);
4343
	netdev_info(bond_dev, "Released all slaves\n");
4344

4345 4346 4347 4348 4349 4350
	arr = rtnl_dereference(bond->slave_arr);
	if (arr) {
		RCU_INIT_POINTER(bond->slave_arr, NULL);
		kfree_rcu(arr, rcu);
	}

J
Jay Vosburgh 已提交
4351 4352
	list_del(&bond->bond_list);

4353
	bond_debug_unregister(bond);
J
Jay Vosburgh 已提交
4354 4355
}

L
Linus Torvalds 已提交
4356 4357 4358 4359
/*------------------------- Module initialization ---------------------------*/

static int bond_check_params(struct bond_params *params)
{
4360
	int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4361 4362
	struct bond_opt_value newval;
	const struct bond_opt_value *valptr;
4363
	int arp_all_targets_value = 0;
4364
	u16 ad_actor_sys_prio = 0;
4365
	u16 ad_user_port_key = 0;
4366
	__be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
4367 4368 4369 4370
	int arp_ip_count;
	int bond_mode	= BOND_MODE_ROUNDROBIN;
	int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
	int lacp_fast = 0;
4371
	int tlb_dynamic_lb;
4372

4373
	/* Convert string parameters. */
L
Linus Torvalds 已提交
4374
	if (mode) {
4375 4376 4377 4378
		bond_opt_initstr(&newval, mode);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
		if (!valptr) {
			pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
L
Linus Torvalds 已提交
4379 4380
			return -EINVAL;
		}
4381
		bond_mode = valptr->value;
L
Linus Torvalds 已提交
4382 4383
	}

4384
	if (xmit_hash_policy) {
4385 4386 4387
		if (bond_mode == BOND_MODE_ROUNDROBIN ||
		    bond_mode == BOND_MODE_ACTIVEBACKUP ||
		    bond_mode == BOND_MODE_BROADCAST) {
J
Joe Perches 已提交
4388
			pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
J
Joe Perches 已提交
4389
				bond_mode_name(bond_mode));
4390
		} else {
4391 4392 4393 4394
			bond_opt_initstr(&newval, xmit_hash_policy);
			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
						&newval);
			if (!valptr) {
J
Joe Perches 已提交
4395
				pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
4396 4397 4398
				       xmit_hash_policy);
				return -EINVAL;
			}
4399
			xmit_hashtype = valptr->value;
4400 4401 4402
		}
	}

L
Linus Torvalds 已提交
4403 4404
	if (lacp_rate) {
		if (bond_mode != BOND_MODE_8023AD) {
J
Joe Perches 已提交
4405 4406
			pr_info("lacp_rate param is irrelevant in mode %s\n",
				bond_mode_name(bond_mode));
L
Linus Torvalds 已提交
4407
		} else {
4408 4409 4410 4411
			bond_opt_initstr(&newval, lacp_rate);
			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
						&newval);
			if (!valptr) {
J
Joe Perches 已提交
4412
				pr_err("Error: Invalid lacp rate \"%s\"\n",
4413
				       lacp_rate);
L
Linus Torvalds 已提交
4414 4415
				return -EINVAL;
			}
4416
			lacp_fast = valptr->value;
L
Linus Torvalds 已提交
4417 4418 4419
		}
	}

4420
	if (ad_select) {
4421
		bond_opt_initstr(&newval, ad_select);
4422 4423 4424 4425
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
					&newval);
		if (!valptr) {
			pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
4426 4427
			return -EINVAL;
		}
4428 4429
		params->ad_select = valptr->value;
		if (bond_mode != BOND_MODE_8023AD)
4430
			pr_warn("ad_select param only affects 802.3ad mode\n");
4431 4432 4433 4434
	} else {
		params->ad_select = BOND_AD_STABLE;
	}

4435
	if (max_bonds < 0) {
4436 4437
		pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
			max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
L
Linus Torvalds 已提交
4438 4439 4440 4441
		max_bonds = BOND_DEFAULT_MAX_BONDS;
	}

	if (miimon < 0) {
4442 4443
		pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			miimon, INT_MAX);
4444
		miimon = 0;
L
Linus Torvalds 已提交
4445 4446 4447
	}

	if (updelay < 0) {
4448 4449
		pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			updelay, INT_MAX);
L
Linus Torvalds 已提交
4450 4451 4452 4453
		updelay = 0;
	}

	if (downdelay < 0) {
4454 4455
		pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			downdelay, INT_MAX);
L
Linus Torvalds 已提交
4456 4457 4458
		downdelay = 0;
	}

4459 4460
	if ((use_carrier != 0) && (use_carrier != 1)) {
		pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
4461
			use_carrier);
L
Linus Torvalds 已提交
4462 4463 4464
		use_carrier = 1;
	}

4465
	if (num_peer_notif < 0 || num_peer_notif > 255) {
4466 4467
		pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
			num_peer_notif);
4468 4469 4470
		num_peer_notif = 1;
	}

4471
	/* reset values for 802.3ad/TLB/ALB */
4472
	if (!bond_mode_uses_arp(bond_mode)) {
L
Linus Torvalds 已提交
4473
		if (!miimon) {
4474 4475
			pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
			pr_warn("Forcing miimon to 100msec\n");
4476
			miimon = BOND_DEFAULT_MIIMON;
L
Linus Torvalds 已提交
4477 4478 4479
		}
	}

4480
	if (tx_queues < 1 || tx_queues > 255) {
4481 4482
		pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
			tx_queues, BOND_DEFAULT_TX_QUEUES);
4483 4484 4485
		tx_queues = BOND_DEFAULT_TX_QUEUES;
	}

4486
	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
4487 4488
		pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
			all_slaves_active);
4489 4490 4491
		all_slaves_active = 0;
	}

4492
	if (resend_igmp < 0 || resend_igmp > 255) {
4493 4494
		pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
			resend_igmp, BOND_DEFAULT_RESEND_IGMP);
4495 4496 4497
		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
	}

4498 4499
	bond_opt_initval(&newval, packets_per_slave);
	if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
4500 4501 4502 4503 4504
		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
			packets_per_slave, USHRT_MAX);
		packets_per_slave = 1;
	}

L
Linus Torvalds 已提交
4505
	if (bond_mode == BOND_MODE_ALB) {
J
Joe Perches 已提交
4506 4507
		pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
			  updelay);
L
Linus Torvalds 已提交
4508 4509 4510 4511 4512 4513 4514
	}

	if (!miimon) {
		if (updelay || downdelay) {
			/* just warn the user the up/down delay will have
			 * no effect since miimon is zero...
			 */
4515 4516
			pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
				updelay, downdelay);
L
Linus Torvalds 已提交
4517 4518 4519 4520
		}
	} else {
		/* don't allow arp monitoring */
		if (arp_interval) {
4521 4522
			pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
				miimon, arp_interval);
L
Linus Torvalds 已提交
4523 4524 4525 4526
			arp_interval = 0;
		}

		if ((updelay % miimon) != 0) {
4527 4528
			pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
				updelay, miimon, (updelay / miimon) * miimon);
L
Linus Torvalds 已提交
4529 4530 4531 4532 4533
		}

		updelay /= miimon;

		if ((downdelay % miimon) != 0) {
4534 4535 4536
			pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
				downdelay, miimon,
				(downdelay / miimon) * miimon);
L
Linus Torvalds 已提交
4537 4538 4539 4540 4541 4542
		}

		downdelay /= miimon;
	}

	if (arp_interval < 0) {
4543 4544
		pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			arp_interval, INT_MAX);
4545
		arp_interval = 0;
L
Linus Torvalds 已提交
4546 4547
	}

4548 4549
	for (arp_ip_count = 0, i = 0;
	     (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
4550
		__be32 ip;
4551 4552

		/* not a complete check, but good enough to catch mistakes */
4553
		if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
4554
		    !bond_is_ip_target_ok(ip)) {
4555 4556
			pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
				arp_ip_target[i]);
L
Linus Torvalds 已提交
4557 4558
			arp_interval = 0;
		} else {
4559 4560 4561
			if (bond_get_targets_ip(arp_target, ip) == -1)
				arp_target[arp_ip_count++] = ip;
			else
4562 4563
				pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
					&ip);
L
Linus Torvalds 已提交
4564 4565 4566 4567 4568
		}
	}

	if (arp_interval && !arp_ip_count) {
		/* don't allow arping if no arp_ip_target given... */
4569 4570
		pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
			arp_interval);
L
Linus Torvalds 已提交
4571 4572 4573
		arp_interval = 0;
	}

4574 4575
	if (arp_validate) {
		if (!arp_interval) {
J
Joe Perches 已提交
4576
			pr_err("arp_validate requires arp_interval\n");
4577 4578 4579
			return -EINVAL;
		}

4580 4581 4582 4583
		bond_opt_initstr(&newval, arp_validate);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4584
			pr_err("Error: invalid arp_validate \"%s\"\n",
4585
			       arp_validate);
4586 4587
			return -EINVAL;
		}
4588 4589
		arp_validate_value = valptr->value;
	} else {
4590
		arp_validate_value = 0;
4591
	}
4592

4593
	if (arp_all_targets) {
4594 4595 4596 4597
		bond_opt_initstr(&newval, arp_all_targets);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
					&newval);
		if (!valptr) {
4598 4599 4600
			pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
			       arp_all_targets);
			arp_all_targets_value = 0;
4601 4602
		} else {
			arp_all_targets_value = valptr->value;
4603 4604 4605
		}
	}

L
Linus Torvalds 已提交
4606
	if (miimon) {
J
Joe Perches 已提交
4607
		pr_info("MII link monitoring set to %d ms\n", miimon);
L
Linus Torvalds 已提交
4608
	} else if (arp_interval) {
4609 4610
		valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
					  arp_validate_value);
J
Joe Perches 已提交
4611
		pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
4612
			arp_interval, valptr->string, arp_ip_count);
L
Linus Torvalds 已提交
4613 4614

		for (i = 0; i < arp_ip_count; i++)
J
Joe Perches 已提交
4615
			pr_cont(" %s", arp_ip_target[i]);
L
Linus Torvalds 已提交
4616

J
Joe Perches 已提交
4617
		pr_cont("\n");
L
Linus Torvalds 已提交
4618

4619
	} else if (max_bonds) {
L
Linus Torvalds 已提交
4620 4621 4622
		/* miimon and arp_interval not set, we need one so things
		 * work as expected, see bonding.txt for details
		 */
J
Joe Perches 已提交
4623
		pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
L
Linus Torvalds 已提交
4624 4625
	}

4626
	if (primary && !bond_mode_uses_primary(bond_mode)) {
L
Linus Torvalds 已提交
4627 4628 4629
		/* currently, using a primary only makes sense
		 * in active backup, TLB or ALB modes
		 */
4630 4631
		pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
			primary, bond_mode_name(bond_mode));
L
Linus Torvalds 已提交
4632 4633 4634
		primary = NULL;
	}

4635
	if (primary && primary_reselect) {
4636 4637 4638 4639
		bond_opt_initstr(&newval, primary_reselect);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4640
			pr_err("Error: Invalid primary_reselect \"%s\"\n",
4641
			       primary_reselect);
4642 4643
			return -EINVAL;
		}
4644
		primary_reselect_value = valptr->value;
4645 4646 4647 4648
	} else {
		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
	}

4649
	if (fail_over_mac) {
4650 4651 4652 4653
		bond_opt_initstr(&newval, fail_over_mac);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4654
			pr_err("Error: invalid fail_over_mac \"%s\"\n",
4655
			       fail_over_mac);
4656 4657
			return -EINVAL;
		}
4658
		fail_over_mac_value = valptr->value;
4659
		if (bond_mode != BOND_MODE_ACTIVEBACKUP)
4660
			pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
4661 4662 4663
	} else {
		fail_over_mac_value = BOND_FOM_NONE;
	}
4664

4665 4666 4667 4668 4669 4670 4671 4672 4673 4674
	bond_opt_initstr(&newval, "default");
	valptr = bond_opt_parse(
			bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
				     &newval);
	if (!valptr) {
		pr_err("Error: No ad_actor_sys_prio default value");
		return -EINVAL;
	}
	ad_actor_sys_prio = valptr->value;

4675 4676 4677 4678 4679 4680 4681 4682
	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
				&newval);
	if (!valptr) {
		pr_err("Error: No ad_user_port_key default value");
		return -EINVAL;
	}
	ad_user_port_key = valptr->value;

4683 4684 4685 4686 4687
	bond_opt_initstr(&newval, "default");
	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
	if (!valptr) {
		pr_err("Error: No tlb_dynamic_lb default value");
		return -EINVAL;
4688
	}
4689
	tlb_dynamic_lb = valptr->value;
4690

4691
	if (lp_interval == 0) {
4692 4693
		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
			INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
4694 4695 4696
		lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
	}

L
Linus Torvalds 已提交
4697 4698
	/* fill params struct with the proper values */
	params->mode = bond_mode;
4699
	params->xmit_policy = xmit_hashtype;
L
Linus Torvalds 已提交
4700
	params->miimon = miimon;
4701
	params->num_peer_notif = num_peer_notif;
L
Linus Torvalds 已提交
4702
	params->arp_interval = arp_interval;
4703
	params->arp_validate = arp_validate_value;
4704
	params->arp_all_targets = arp_all_targets_value;
L
Linus Torvalds 已提交
4705 4706 4707 4708 4709
	params->updelay = updelay;
	params->downdelay = downdelay;
	params->use_carrier = use_carrier;
	params->lacp_fast = lacp_fast;
	params->primary[0] = 0;
4710
	params->primary_reselect = primary_reselect_value;
4711
	params->fail_over_mac = fail_over_mac_value;
4712
	params->tx_queues = tx_queues;
4713
	params->all_slaves_active = all_slaves_active;
4714
	params->resend_igmp = resend_igmp;
4715
	params->min_links = min_links;
4716
	params->lp_interval = lp_interval;
4717
	params->packets_per_slave = packets_per_slave;
4718
	params->tlb_dynamic_lb = tlb_dynamic_lb;
4719
	params->ad_actor_sys_prio = ad_actor_sys_prio;
4720
	eth_zero_addr(params->ad_actor_system);
4721
	params->ad_user_port_key = ad_user_port_key;
4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732
	if (packets_per_slave > 0) {
		params->reciprocal_packets_per_slave =
			reciprocal_value(packets_per_slave);
	} else {
		/* reciprocal_packets_per_slave is unused if
		 * packets_per_slave is 0 or 1, just initialize it
		 */
		params->reciprocal_packets_per_slave =
			(struct reciprocal_value) { 0 };
	}

L
Linus Torvalds 已提交
4733 4734 4735 4736 4737 4738 4739 4740 4741 4742
	if (primary) {
		strncpy(params->primary, primary, IFNAMSIZ);
		params->primary[IFNAMSIZ - 1] = 0;
	}

	memcpy(params->arp_targets, arp_target, sizeof(arp_target));

	return 0;
}

4743
/* Called from registration process */
4744 4745 4746
static int bond_init(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);
4747
	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
4748

4749
	netdev_dbg(bond_dev, "Begin bond_init\n");
4750

4751
	bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
4752 4753 4754
	if (!bond->wq)
		return -ENOMEM;

4755
	bond->nest_level = SINGLE_DEPTH_NESTING;
4756
	netdev_lockdep_set_classes(bond_dev);
4757

4758
	list_add_tail(&bond->bond_list, &bn->dev_list);
4759

4760
	bond_prepare_sysfs_group(bond);
4761

4762 4763
	bond_debug_register(bond);

4764 4765
	/* Ensure valid dev_addr */
	if (is_zero_ether_addr(bond_dev->dev_addr) &&
4766
	    bond_dev->addr_assign_type == NET_ADDR_PERM)
4767 4768
		eth_hw_addr_random(bond_dev);

4769 4770 4771
	return 0;
}

4772
unsigned int bond_get_num_tx_queues(void)
4773
{
4774
	return tx_queues;
4775 4776
}

4777
/* Create a new bond based on the specified name and bonding parameters.
4778
 * If name is NULL, obtain a suitable "bond%d" name for us.
4779 4780 4781
 * Caller must NOT hold rtnl_lock; we need to release it here before we
 * set up our sysfs entries.
 */
4782
int bond_create(struct net *net, const char *name)
4783 4784
{
	struct net_device *bond_dev;
4785 4786
	struct bonding *bond;
	struct alb_bond_info *bond_info;
4787 4788 4789
	int res;

	rtnl_lock();
4790

4791
	bond_dev = alloc_netdev_mq(sizeof(struct bonding),
4792
				   name ? name : "bond%d", NET_NAME_UNKNOWN,
4793
				   bond_setup, tx_queues);
4794
	if (!bond_dev) {
J
Joe Perches 已提交
4795
		pr_err("%s: eek! can't alloc netdev!\n", name);
4796 4797
		rtnl_unlock();
		return -ENOMEM;
4798 4799
	}

4800 4801 4802 4803 4804 4805 4806 4807
	/*
	 * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
	 * It is set to 0 by default which is wrong.
	 */
	bond = netdev_priv(bond_dev);
	bond_info = &(BOND_ALB_INFO(bond));
	bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;

4808
	dev_net_set(bond_dev, net);
4809 4810
	bond_dev->rtnl_link_ops = &bond_link_ops;

4811
	res = register_netdevice(bond_dev);
4812

4813 4814
	netif_carrier_off(bond_dev);

4815 4816
	bond_work_init_all(bond);

4817
	rtnl_unlock();
4818
	if (res < 0)
4819
		free_netdev(bond_dev);
E
Eric W. Biederman 已提交
4820
	return res;
4821 4822
}

4823
static int __net_init bond_net_init(struct net *net)
4824
{
4825
	struct bond_net *bn = net_generic(net, bond_net_id);
4826 4827 4828 4829 4830

	bn->net = net;
	INIT_LIST_HEAD(&bn->dev_list);

	bond_create_proc_dir(bn);
4831
	bond_create_sysfs(bn);
4832

4833
	return 0;
4834 4835
}

4836
static void __net_exit bond_net_exit(struct net *net)
4837
{
4838
	struct bond_net *bn = net_generic(net, bond_net_id);
4839 4840
	struct bonding *bond, *tmp_bond;
	LIST_HEAD(list);
4841

4842
	bond_destroy_sysfs(bn);
4843 4844 4845 4846 4847 4848 4849

	/* Kill off any bonds created after unregistering bond rtnl ops */
	rtnl_lock();
	list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
		unregister_netdevice_queue(bond->dev, &list);
	unregister_netdevice_many(&list);
	rtnl_unlock();
4850 4851

	bond_destroy_proc_dir(bn);
4852 4853 4854 4855 4856
}

static struct pernet_operations bond_net_ops = {
	.init = bond_net_init,
	.exit = bond_net_exit,
4857 4858
	.id   = &bond_net_id,
	.size = sizeof(struct bond_net),
4859 4860
};

L
Linus Torvalds 已提交
4861 4862 4863 4864 4865
static int __init bonding_init(void)
{
	int i;
	int res;

4866
	pr_info("%s", bond_version);
L
Linus Torvalds 已提交
4867

4868
	res = bond_check_params(&bonding_defaults);
S
Stephen Hemminger 已提交
4869
	if (res)
4870
		goto out;
L
Linus Torvalds 已提交
4871

4872
	res = register_pernet_subsys(&bond_net_ops);
4873 4874
	if (res)
		goto out;
4875

4876
	res = bond_netlink_init();
4877
	if (res)
4878
		goto err_link;
4879

4880 4881
	bond_create_debugfs();

L
Linus Torvalds 已提交
4882
	for (i = 0; i < max_bonds; i++) {
4883
		res = bond_create(&init_net, NULL);
4884 4885
		if (res)
			goto err;
L
Linus Torvalds 已提交
4886 4887 4888
	}

	register_netdevice_notifier(&bond_netdev_notifier);
4889
out:
L
Linus Torvalds 已提交
4890
	return res;
4891
err:
4892
	bond_destroy_debugfs();
4893
	bond_netlink_fini();
4894
err_link:
4895
	unregister_pernet_subsys(&bond_net_ops);
4896
	goto out;
4897

L
Linus Torvalds 已提交
4898 4899 4900 4901 4902 4903
}

static void __exit bonding_exit(void)
{
	unregister_netdevice_notifier(&bond_netdev_notifier);

4904
	bond_destroy_debugfs();
4905

4906
	bond_netlink_fini();
4907
	unregister_pernet_subsys(&bond_net_ops);
4908 4909

#ifdef CONFIG_NET_POLL_CONTROLLER
4910
	/* Make sure we don't have an imbalance on our netpoll blocking */
4911
	WARN_ON(atomic_read(&netpoll_block_tx));
4912
#endif
L
Linus Torvalds 已提交
4913 4914 4915 4916 4917 4918 4919 4920
}

module_init(bonding_init);
module_exit(bonding_exit);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");