bond_main.c 135.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
 * originally based on the dummy device.
 *
 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
 * Licensed under the GPL. Based on dummy.c, and eql.c devices.
 *
 * bonding.c: an Ethernet Bonding driver
 *
 * This is useful to talk to a Cisco EtherChannel compatible equipment:
 *	Cisco 5500
 *	Sun Trunking (Solaris)
 *	Alteon AceDirector Trunks
 *	Linux Bonding
 *	and probably many L2 switches ...
 *
 * How it works:
 *    ifconfig bond0 ipaddress netmask up
 *      will setup a network device, with an ip address.  No mac address
 *	will be assigned at this time.  The hw mac address will come from
 *	the first slave bonded to the channel.  All slaves will then use
 *	this hw mac address.
 *
 *    ifconfig bond0 down
 *         will release all slaves, marking them as down.
 *
 *    ifenslave bond0 eth0
 *	will attach eth0 to bond0 as a slave.  eth0 hw mac address will either
 *	a: be used as initial mac address
 *	b: if a hw mac address already is there, eth0's hw mac address
 *	   will then be set from bond0.
 *
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
42
#include <net/ip.h>
L
Linus Torvalds 已提交
43
#include <linux/ip.h>
44 45
#include <linux/tcp.h>
#include <linux/udp.h>
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/socket.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/bitops.h>
S
Stephen Hemminger 已提交
54
#include <linux/io.h>
L
Linus Torvalds 已提交
55
#include <asm/dma.h>
S
Stephen Hemminger 已提交
56
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
57 58 59
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
60
#include <linux/igmp.h>
L
Linus Torvalds 已提交
61 62 63 64 65 66 67 68 69 70 71
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/smp.h>
#include <linux/if_ether.h>
#include <net/arp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_bonding.h>
D
David Sterba 已提交
72
#include <linux/jiffies.h>
73
#include <linux/preempt.h>
J
Jay Vosburgh 已提交
74
#include <net/route.h>
75
#include <net/net_namespace.h>
76
#include <net/netns/generic.h>
77
#include <net/pkt_sched.h>
78
#include <linux/rculist.h>
79
#include <net/flow_dissector.h>
80
#include <net/switchdev.h>
81 82 83
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
L
Linus Torvalds 已提交
84

85 86
#include "bonding_priv.h"

L
Linus Torvalds 已提交
87 88 89 90 91
/*---------------------------- Module parameters ----------------------------*/

/* monitor all links that often (in milliseconds). <=0 disables monitoring */

static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
92
static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
93
static int num_peer_notif = 1;
94
static int miimon;
S
Stephen Hemminger 已提交
95 96
static int updelay;
static int downdelay;
L
Linus Torvalds 已提交
97
static int use_carrier	= 1;
S
Stephen Hemminger 已提交
98 99
static char *mode;
static char *primary;
100
static char *primary_reselect;
S
Stephen Hemminger 已提交
101
static char *lacp_rate;
102
static int min_links;
S
Stephen Hemminger 已提交
103 104
static char *ad_select;
static char *xmit_hash_policy;
105
static int arp_interval;
S
Stephen Hemminger 已提交
106 107
static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
108
static char *arp_all_targets;
S
Stephen Hemminger 已提交
109
static char *fail_over_mac;
110
static int all_slaves_active;
111
static struct bond_params bonding_defaults;
112
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
113
static int packets_per_slave = 1;
114
static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
L
Linus Torvalds 已提交
115 116 117

module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
118 119
module_param(tx_queues, int, 0);
MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
120
module_param_named(num_grat_arp, num_peer_notif, int, 0644);
121 122
MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
			       "failover event (alias of num_unsol_na)");
123
module_param_named(num_unsol_na, num_peer_notif, int, 0644);
124 125
MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
			       "failover event (alias of num_grat_arp)");
L
Linus Torvalds 已提交
126 127 128 129 130
module_param(miimon, int, 0);
MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
module_param(updelay, int, 0);
MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
module_param(downdelay, int, 0);
131 132
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
			    "in milliseconds");
L
Linus Torvalds 已提交
133
module_param(use_carrier, int, 0);
134
MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
135
			      "0 for off, 1 for on (default)");
L
Linus Torvalds 已提交
136
module_param(mode, charp, 0);
137
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
138 139 140
		       "1 for active-backup, 2 for balance-xor, "
		       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
		       "6 for balance-alb");
L
Linus Torvalds 已提交
141 142
module_param(primary, charp, 0);
MODULE_PARM_DESC(primary, "Primary network device to use");
143 144 145 146 147 148 149 150
module_param(primary_reselect, charp, 0);
MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
				   "once it comes up; "
				   "0 for always (default), "
				   "1 for only if speed of primary is "
				   "better, "
				   "2 for only on active slave "
				   "failure");
L
Linus Torvalds 已提交
151
module_param(lacp_rate, charp, 0);
152 153
MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
			    "0 for slow, 1 for fast");
154
module_param(ad_select, charp, 0);
Z
Zhu Yanjun 已提交
155
MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
156 157
			    "0 for stable (default), 1 for bandwidth, "
			    "2 for count");
158 159 160
module_param(min_links, int, 0);
MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");

161
module_param(xmit_hash_policy, charp, 0);
162
MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
163
				   "0 for layer 2 (default), 1 for layer 3+4, "
164 165
				   "2 for layer 2+3, 3 for encap layer 2+3, "
				   "4 for encap layer 3+4");
L
Linus Torvalds 已提交
166 167 168 169
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
170
module_param(arp_validate, charp, 0);
171 172 173
MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
			       "0 for none (default), 1 for active, "
			       "2 for backup, 3 for all");
174 175
module_param(arp_all_targets, charp, 0);
MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
176
module_param(fail_over_mac, charp, 0);
177 178 179
MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
				"the same MAC; 0 for none (default), "
				"1 for active, 2 for follow");
180
module_param(all_slaves_active, int, 0);
181
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
182
				     "by setting active flag for all slaves; "
183
				     "0 for never (default), 1 for always.");
184
module_param(resend_igmp, int, 0);
185 186
MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
			      "link failure");
187 188 189 190
module_param(packets_per_slave, int, 0);
MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
				    "mode; 0 for a random slave, 1 packet per "
				    "slave (default), >1 packets per slave.");
191 192 193 194
module_param(lp_interval, uint, 0);
MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
			      "the bonding driver sends learning packets to "
			      "each slaves peer switch. The default is 1.");
L
Linus Torvalds 已提交
195 196 197

/*----------------------------- Global variables ----------------------------*/

198
#ifdef CONFIG_NET_POLL_CONTROLLER
199
atomic_t netpoll_block_tx = ATOMIC_INIT(0);
200 201
#endif

202
unsigned int bond_net_id __read_mostly;
L
Linus Torvalds 已提交
203 204 205

/*-------------------------- Forward declarations ---------------------------*/

206
static int bond_init(struct net_device *bond_dev);
207
static void bond_uninit(struct net_device *bond_dev);
208 209
static void bond_get_stats(struct net_device *bond_dev,
			   struct rtnl_link_stats64 *stats);
210
static void bond_slave_arr_handler(struct work_struct *work);
211 212
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
				  int mod);
L
Linus Torvalds 已提交
213 214 215

/*---------------------------- General routines -----------------------------*/

216
const char *bond_mode_name(int mode)
L
Linus Torvalds 已提交
217
{
218 219 220 221 222
	static const char *names[] = {
		[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
		[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
		[BOND_MODE_XOR] = "load balancing (xor)",
		[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
S
Stephen Hemminger 已提交
223
		[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
224 225 226 227
		[BOND_MODE_TLB] = "transmit load balancing",
		[BOND_MODE_ALB] = "adaptive load balancing",
	};

228
	if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
L
Linus Torvalds 已提交
229
		return "unknown";
230 231

	return names[mode];
L
Linus Torvalds 已提交
232 233 234 235 236 237
}

/*---------------------------------- VLAN -----------------------------------*/

/**
 * bond_dev_queue_xmit - Prepare skb for xmit.
S
Stephen Hemminger 已提交
238
 *
L
Linus Torvalds 已提交
239 240 241 242
 * @bond: bond device that got this skb for tx.
 * @skb: hw accel VLAN tagged skb to transmit
 * @slave_dev: slave that is supposed to xmit this skbuff
 */
243
void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
S
Stephen Hemminger 已提交
244
			struct net_device *slave_dev)
L
Linus Torvalds 已提交
245
{
246
	skb->dev = slave_dev;
247

248
	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
249
		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
250
	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
251

252
	if (unlikely(netpoll_tx_running(bond->dev)))
253
		bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
254
	else
255
		dev_queue_xmit(skb);
L
Linus Torvalds 已提交
256 257
}

258
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
J
Jiri Pirko 已提交
259
 * We don't protect the slave list iteration with a lock because:
L
Linus Torvalds 已提交
260 261 262 263
 * a. This operation is performed in IOCTL context,
 * b. The operation is protected by the RTNL semaphore in the 8021q code,
 * c. Holding a lock with BH disabled while directly calling a base driver
 *    entry point is generally a BAD idea.
S
Stephen Hemminger 已提交
264
 *
L
Linus Torvalds 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278
 * The design of synchronization/protection for this operation in the 8021q
 * module is good for one or more VLAN devices over a single physical device
 * and cannot be extended for a teaming solution like bonding, so there is a
 * potential race condition here where a net device from the vlan group might
 * be referenced (either by a base driver or the 8021q code) while it is being
 * removed from the system. However, it turns out we're not making matters
 * worse, and if it works for regular VLAN usage it will work here too.
*/

/**
 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
 * @bond_dev: bonding net device that got called
 * @vid: vlan id being added
 */
279 280
static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
				__be16 proto, u16 vid)
L
Linus Torvalds 已提交
281
{
282
	struct bonding *bond = netdev_priv(bond_dev);
283
	struct slave *slave, *rollback_slave;
284
	struct list_head *iter;
285
	int res;
L
Linus Torvalds 已提交
286

287
	bond_for_each_slave(bond, slave, iter) {
288
		res = vlan_vid_add(slave->dev, proto, vid);
289 290
		if (res)
			goto unwind;
L
Linus Torvalds 已提交
291 292
	}

293
	return 0;
294 295

unwind:
296
	/* unwind to the slave that failed */
297
	bond_for_each_slave(bond, rollback_slave, iter) {
298 299 300 301 302
		if (rollback_slave == slave)
			break;

		vlan_vid_del(rollback_slave->dev, proto, vid);
	}
303 304

	return res;
L
Linus Torvalds 已提交
305 306 307 308 309 310 311
}

/**
 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
 * @bond_dev: bonding net device that got called
 * @vid: vlan id being removed
 */
312 313
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
				 __be16 proto, u16 vid)
L
Linus Torvalds 已提交
314
{
315
	struct bonding *bond = netdev_priv(bond_dev);
316
	struct list_head *iter;
L
Linus Torvalds 已提交
317 318
	struct slave *slave;

319
	bond_for_each_slave(bond, slave, iter)
320
		vlan_vid_del(slave->dev, proto, vid);
L
Linus Torvalds 已提交
321

322 323
	if (bond_is_lb(bond))
		bond_alb_clear_vlan(bond, vid);
324 325

	return 0;
L
Linus Torvalds 已提交
326 327 328 329
}

/*------------------------------- Link status -------------------------------*/

330
/* Set the carrier state for the master according to the state of its
331 332 333 334 335
 * slaves.  If any slaves are up, the master is up.  In 802.3ad mode,
 * do special 802.3ad magic.
 *
 * Returns zero if carrier state does not change, nonzero if it does.
 */
336
int bond_set_carrier(struct bonding *bond)
337
{
338
	struct list_head *iter;
339 340
	struct slave *slave;

341
	if (!bond_has_slaves(bond))
342 343
		goto down;

344
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
345 346
		return bond_3ad_set_carrier(bond);

347
	bond_for_each_slave(bond, slave, iter) {
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
		if (slave->link == BOND_LINK_UP) {
			if (!netif_carrier_ok(bond->dev)) {
				netif_carrier_on(bond->dev);
				return 1;
			}
			return 0;
		}
	}

down:
	if (netif_carrier_ok(bond->dev)) {
		netif_carrier_off(bond->dev);
		return 1;
	}
	return 0;
}

365
/* Get link speed and duplex from the slave's base driver
L
Linus Torvalds 已提交
366
 * using ethtool. If for some reason the call fails or the
367
 * values are invalid, set speed and duplex to -1,
368 369
 * and return. Return 1 if speed or duplex settings are
 * UNKNOWN; 0 otherwise.
L
Linus Torvalds 已提交
370
 */
371
static int bond_update_speed_duplex(struct slave *slave)
L
Linus Torvalds 已提交
372 373
{
	struct net_device *slave_dev = slave->dev;
374
	struct ethtool_link_ksettings ecmd;
375
	int res;
L
Linus Torvalds 已提交
376

377 378
	slave->speed = SPEED_UNKNOWN;
	slave->duplex = DUPLEX_UNKNOWN;
L
Linus Torvalds 已提交
379

380
	res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
381
	if (res < 0)
382
		return 1;
383
	if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
384
		return 1;
385
	switch (ecmd.base.duplex) {
L
Linus Torvalds 已提交
386 387 388 389
	case DUPLEX_FULL:
	case DUPLEX_HALF:
		break;
	default:
390
		return 1;
L
Linus Torvalds 已提交
391 392
	}

393 394
	slave->speed = ecmd.base.speed;
	slave->duplex = ecmd.base.duplex;
L
Linus Torvalds 已提交
395

396
	return 0;
L
Linus Torvalds 已提交
397 398
}

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
const char *bond_slave_link_status(s8 link)
{
	switch (link) {
	case BOND_LINK_UP:
		return "up";
	case BOND_LINK_FAIL:
		return "going down";
	case BOND_LINK_DOWN:
		return "down";
	case BOND_LINK_BACK:
		return "going back";
	default:
		return "unknown";
	}
}

415
/* if <dev> supports MII link status reporting, check its link status.
L
Linus Torvalds 已提交
416 417
 *
 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
S
Stephen Hemminger 已提交
418
 * depending upon the setting of the use_carrier parameter.
L
Linus Torvalds 已提交
419 420 421 422 423 424 425 426 427 428 429
 *
 * Return either BMSR_LSTATUS, meaning that the link is up (or we
 * can't tell and just pretend it is), or 0, meaning that the link is
 * down.
 *
 * If reporting is non-zero, instead of faking link up, return -1 if
 * both ETHTOOL and MII ioctls fail (meaning the device does not
 * support them).  If use_carrier is set, return whatever it says.
 * It'd be nice if there was a good way to tell if a driver supports
 * netif_carrier, but there really isn't.
 */
S
Stephen Hemminger 已提交
430 431
static int bond_check_dev_link(struct bonding *bond,
			       struct net_device *slave_dev, int reporting)
L
Linus Torvalds 已提交
432
{
433
	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
434
	int (*ioctl)(struct net_device *, struct ifreq *, int);
L
Linus Torvalds 已提交
435 436 437
	struct ifreq ifr;
	struct mii_ioctl_data *mii;

438 439 440
	if (!reporting && !netif_running(slave_dev))
		return 0;

441
	if (bond->params.use_carrier)
442
		return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
L
Linus Torvalds 已提交
443

444
	/* Try to get link status using Ethtool first. */
445 446 447
	if (slave_dev->ethtool_ops->get_link)
		return slave_dev->ethtool_ops->get_link(slave_dev) ?
			BMSR_LSTATUS : 0;
448

S
Stephen Hemminger 已提交
449
	/* Ethtool can't be used, fallback to MII ioctls. */
450
	ioctl = slave_ops->ndo_do_ioctl;
L
Linus Torvalds 已提交
451
	if (ioctl) {
452 453 454 455 456 457 458 459
		/* TODO: set pointer to correct ioctl on a per team member
		 *       bases to make this more efficient. that is, once
		 *       we determine the correct ioctl, we will always
		 *       call it and not the others for that team
		 *       member.
		 */

		/* We cannot assume that SIOCGMIIPHY will also read a
L
Linus Torvalds 已提交
460 461 462 463 464 465 466
		 * register; not all network drivers (e.g., e100)
		 * support that.
		 */

		/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
		strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
		mii = if_mii(&ifr);
A
Al Viro 已提交
467
		if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
L
Linus Torvalds 已提交
468
			mii->reg_num = MII_BMSR;
A
Al Viro 已提交
469
			if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
S
Stephen Hemminger 已提交
470
				return mii->val_out & BMSR_LSTATUS;
L
Linus Torvalds 已提交
471 472 473
		}
	}

474
	/* If reporting, report that either there's no dev->do_ioctl,
475
	 * or both SIOCGMIIREG and get_link failed (meaning that we
L
Linus Torvalds 已提交
476 477 478
	 * cannot report link status).  If not reporting, pretend
	 * we're ok.
	 */
S
Stephen Hemminger 已提交
479
	return reporting ? -1 : BMSR_LSTATUS;
L
Linus Torvalds 已提交
480 481 482 483
}

/*----------------------------- Multicast list ------------------------------*/

484
/* Push the promiscuity flag down to appropriate slaves */
485
static int bond_set_promiscuity(struct bonding *bond, int inc)
L
Linus Torvalds 已提交
486
{
487
	struct list_head *iter;
488
	int err = 0;
489

490
	if (bond_uses_primary(bond)) {
491
		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
492 493 494

		if (curr_active)
			err = dev_set_promiscuity(curr_active->dev, inc);
L
Linus Torvalds 已提交
495 496
	} else {
		struct slave *slave;
497

498
		bond_for_each_slave(bond, slave, iter) {
499 500 501
			err = dev_set_promiscuity(slave->dev, inc);
			if (err)
				return err;
L
Linus Torvalds 已提交
502 503
		}
	}
504
	return err;
L
Linus Torvalds 已提交
505 506
}

507
/* Push the allmulti flag down to all slaves */
508
static int bond_set_allmulti(struct bonding *bond, int inc)
L
Linus Torvalds 已提交
509
{
510
	struct list_head *iter;
511
	int err = 0;
512

513
	if (bond_uses_primary(bond)) {
514
		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
515 516 517

		if (curr_active)
			err = dev_set_allmulti(curr_active->dev, inc);
L
Linus Torvalds 已提交
518 519
	} else {
		struct slave *slave;
520

521
		bond_for_each_slave(bond, slave, iter) {
522 523 524
			err = dev_set_allmulti(slave->dev, inc);
			if (err)
				return err;
L
Linus Torvalds 已提交
525 526
		}
	}
527
	return err;
L
Linus Torvalds 已提交
528 529
}

530
/* Retrieve the list of registered multicast addresses for the bonding
531 532 533
 * device and retransmit an IGMP JOIN request to the current active
 * slave.
 */
534
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
535
{
536 537 538
	struct bonding *bond = container_of(work, struct bonding,
					    mcast_work.work);

539
	if (!rtnl_trylock()) {
540
		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
541
		return;
542
	}
543
	call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
544

545 546
	if (bond->igmp_retrans > 1) {
		bond->igmp_retrans--;
547
		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
548
	}
549
	rtnl_unlock();
550 551
}

552
/* Flush bond's hardware addresses from slave */
553
static void bond_hw_addr_flush(struct net_device *bond_dev,
S
Stephen Hemminger 已提交
554
			       struct net_device *slave_dev)
L
Linus Torvalds 已提交
555
{
556
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
557

558 559
	dev_uc_unsync(slave_dev, bond_dev);
	dev_mc_unsync(slave_dev, bond_dev);
L
Linus Torvalds 已提交
560

561
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
L
Linus Torvalds 已提交
562 563 564
		/* del lacpdu mc addr from mc list */
		u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;

565
		dev_mc_del(slave_dev, lacpdu_multicast);
L
Linus Torvalds 已提交
566 567 568 569 570
	}
}

/*--------------------------- Active slave change ---------------------------*/

571
/* Update the hardware address list and promisc/allmulti for the new and
572 573
 * old active slaves (if any).  Modes that are not using primary keep all
 * slaves up date at all times; only the modes that use primary need to call
574
 * this function to swap these settings during a failover.
L
Linus Torvalds 已提交
575
 */
576 577
static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
			      struct slave *old_active)
L
Linus Torvalds 已提交
578 579
{
	if (old_active) {
S
Stephen Hemminger 已提交
580
		if (bond->dev->flags & IFF_PROMISC)
L
Linus Torvalds 已提交
581 582
			dev_set_promiscuity(old_active->dev, -1);

S
Stephen Hemminger 已提交
583
		if (bond->dev->flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
584 585
			dev_set_allmulti(old_active->dev, -1);

586
		bond_hw_addr_flush(bond->dev, old_active->dev);
L
Linus Torvalds 已提交
587 588 589
	}

	if (new_active) {
590
		/* FIXME: Signal errors upstream. */
S
Stephen Hemminger 已提交
591
		if (bond->dev->flags & IFF_PROMISC)
L
Linus Torvalds 已提交
592 593
			dev_set_promiscuity(new_active->dev, 1);

S
Stephen Hemminger 已提交
594
		if (bond->dev->flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
595 596
			dev_set_allmulti(new_active->dev, 1);

597
		netif_addr_lock_bh(bond->dev);
598 599
		dev_uc_sync(new_active->dev, bond->dev);
		dev_mc_sync(new_active->dev, bond->dev);
600
		netif_addr_unlock_bh(bond->dev);
L
Linus Torvalds 已提交
601 602 603
	}
}

604 605 606 607 608 609 610 611 612 613
/**
 * bond_set_dev_addr - clone slave's address to bond
 * @bond_dev: bond net device
 * @slave_dev: slave net device
 *
 * Should be called with RTNL held.
 */
static void bond_set_dev_addr(struct net_device *bond_dev,
			      struct net_device *slave_dev)
{
614 615
	netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n",
		   bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len);
616 617 618 619 620
	memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
	bond_dev->addr_assign_type = NET_ADDR_STOLEN;
	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
}

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
static struct slave *bond_get_old_active(struct bonding *bond,
					 struct slave *new_active)
{
	struct slave *slave;
	struct list_head *iter;

	bond_for_each_slave(bond, slave, iter) {
		if (slave == new_active)
			continue;

		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
			return slave;
	}

	return NULL;
}

638
/* bond_do_fail_over_mac
639 640 641
 *
 * Perform special MAC address swapping for fail_over_mac settings
 *
642
 * Called with RTNL
643 644 645 646 647
 */
static void bond_do_fail_over_mac(struct bonding *bond,
				  struct slave *new_active,
				  struct slave *old_active)
{
648 649
	u8 tmp_mac[MAX_ADDR_LEN];
	struct sockaddr_storage ss;
650 651 652 653
	int rv;

	switch (bond->params.fail_over_mac) {
	case BOND_FOM_ACTIVE:
654
		if (new_active)
655
			bond_set_dev_addr(bond->dev, new_active->dev);
656 657
		break;
	case BOND_FOM_FOLLOW:
658
		/* if new_active && old_active, swap them
659 660 661 662 663 664
		 * if just old_active, do nothing (going to no active slave)
		 * if just new_active, set new_active to bond's MAC
		 */
		if (!new_active)
			return;

665 666 667
		if (!old_active)
			old_active = bond_get_old_active(bond, new_active);

668
		if (old_active) {
669 670 671 672 673 674
			bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
					  new_active->dev->addr_len);
			bond_hw_addr_copy(ss.__data,
					  old_active->dev->dev_addr,
					  old_active->dev->addr_len);
			ss.ss_family = new_active->dev->type;
675
		} else {
676 677 678
			bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
					  bond->dev->addr_len);
			ss.ss_family = bond->dev->type;
679 680
		}

681 682
		rv = dev_set_mac_address(new_active->dev,
					 (struct sockaddr *)&ss);
683
		if (rv) {
684 685
			netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
				   -rv, new_active->dev->name);
686 687 688 689 690 691
			goto out;
		}

		if (!old_active)
			goto out;

692 693 694
		bond_hw_addr_copy(ss.__data, tmp_mac,
				  new_active->dev->addr_len);
		ss.ss_family = old_active->dev->type;
695

696 697
		rv = dev_set_mac_address(old_active->dev,
					 (struct sockaddr *)&ss);
698
		if (rv)
699 700
			netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
				   -rv, new_active->dev->name);
701 702 703
out:
		break;
	default:
704 705
		netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
			   bond->params.fail_over_mac);
706 707 708 709 710
		break;
	}

}

711
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
712
{
713
	struct slave *prim = rtnl_dereference(bond->primary_slave);
714
	struct slave *curr = rtnl_dereference(bond->curr_active_slave);
715

716 717 718 719 720 721
	if (!prim || prim->link != BOND_LINK_UP) {
		if (!curr || curr->link != BOND_LINK_UP)
			return NULL;
		return curr;
	}

722 723
	if (bond->force_primary) {
		bond->force_primary = false;
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
		return prim;
	}

	if (!curr || curr->link != BOND_LINK_UP)
		return prim;

	/* At this point, prim and curr are both up */
	switch (bond->params.primary_reselect) {
	case BOND_PRI_RESELECT_ALWAYS:
		return prim;
	case BOND_PRI_RESELECT_BETTER:
		if (prim->speed < curr->speed)
			return curr;
		if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
			return curr;
		return prim;
	case BOND_PRI_RESELECT_FAILURE:
		return curr;
	default:
		netdev_err(bond->dev, "impossible primary_reselect %d\n",
			   bond->params.primary_reselect);
		return curr;
746 747
	}
}
748

L
Linus Torvalds 已提交
749
/**
750
 * bond_find_best_slave - select the best available slave to be the active one
L
Linus Torvalds 已提交
751 752 753 754
 * @bond: our bonding struct
 */
static struct slave *bond_find_best_slave(struct bonding *bond)
{
755
	struct slave *slave, *bestslave = NULL;
756
	struct list_head *iter;
L
Linus Torvalds 已提交
757 758
	int mintime = bond->params.updelay;

759 760 761
	slave = bond_choose_primary_or_current(bond);
	if (slave)
		return slave;
L
Linus Torvalds 已提交
762

763 764 765
	bond_for_each_slave(bond, slave, iter) {
		if (slave->link == BOND_LINK_UP)
			return slave;
766
		if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
767 768 769
		    slave->delay < mintime) {
			mintime = slave->delay;
			bestslave = slave;
L
Linus Torvalds 已提交
770 771 772 773 774 775
		}
	}

	return bestslave;
}

776 777
static bool bond_should_notify_peers(struct bonding *bond)
{
778 779 780 781 782
	struct slave *slave;

	rcu_read_lock();
	slave = rcu_dereference(bond->curr_active_slave);
	rcu_read_unlock();
783

784 785
	netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
		   slave ? slave->dev->name : "NULL");
786 787

	if (!slave || !bond->send_peer_notif ||
788
	    !netif_carrier_ok(bond->dev) ||
789 790 791 792 793 794
	    test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
		return false;

	return true;
}

L
Linus Torvalds 已提交
795 796 797 798 799 800 801 802 803 804 805 806 807
/**
 * change_active_interface - change the active slave into the specified one
 * @bond: our bonding struct
 * @new: the new slave to make the active one
 *
 * Set the new slave to the bond's settings and unset them on the old
 * curr_active_slave.
 * Setting include flags, mc-list, promiscuity, allmulti, etc.
 *
 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
 * because it is apparently the best available slave we have, even though its
 * updelay hasn't timed out yet.
 *
808
 * Caller must hold RTNL.
L
Linus Torvalds 已提交
809
 */
810
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
L
Linus Torvalds 已提交
811
{
812 813
	struct slave *old_active;

814 815 816
	ASSERT_RTNL();

	old_active = rtnl_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
817

S
Stephen Hemminger 已提交
818
	if (old_active == new_active)
L
Linus Torvalds 已提交
819 820 821
		return;

	if (new_active) {
822
		new_active->last_link_up = jiffies;
823

L
Linus Torvalds 已提交
824
		if (new_active->link == BOND_LINK_BACK) {
825
			if (bond_uses_primary(bond)) {
826 827 828
				netdev_info(bond->dev, "making interface %s the new active one %d ms earlier\n",
					    new_active->dev->name,
					    (bond->params.updelay - new_active->delay) * bond->params.miimon);
L
Linus Torvalds 已提交
829 830 831
			}

			new_active->delay = 0;
832 833
			bond_set_slave_link_state(new_active, BOND_LINK_UP,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
834

835
			if (BOND_MODE(bond) == BOND_MODE_8023AD)
L
Linus Torvalds 已提交
836 837
				bond_3ad_handle_link_change(new_active, BOND_LINK_UP);

838
			if (bond_is_lb(bond))
L
Linus Torvalds 已提交
839 840
				bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
		} else {
841
			if (bond_uses_primary(bond)) {
842 843
				netdev_info(bond->dev, "making interface %s the new active one\n",
					    new_active->dev->name);
L
Linus Torvalds 已提交
844 845 846 847
			}
		}
	}

848
	if (bond_uses_primary(bond))
849
		bond_hw_addr_swap(bond, new_active, old_active);
L
Linus Torvalds 已提交
850

851
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
852
		bond_alb_handle_active_change(bond, new_active);
853
		if (old_active)
854 855
			bond_set_slave_inactive_flags(old_active,
						      BOND_SLAVE_NOTIFY_NOW);
856
		if (new_active)
857 858
			bond_set_slave_active_flags(new_active,
						    BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
859
	} else {
860
		rcu_assign_pointer(bond->curr_active_slave, new_active);
L
Linus Torvalds 已提交
861
	}
J
Jay Vosburgh 已提交
862

863
	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
S
Stephen Hemminger 已提交
864
		if (old_active)
865 866
			bond_set_slave_inactive_flags(old_active,
						      BOND_SLAVE_NOTIFY_NOW);
J
Jay Vosburgh 已提交
867 868

		if (new_active) {
869 870
			bool should_notify_peers = false;

871 872
			bond_set_slave_active_flags(new_active,
						    BOND_SLAVE_NOTIFY_NOW);
873

874 875 876
			if (bond->params.fail_over_mac)
				bond_do_fail_over_mac(bond, new_active,
						      old_active);
877

878 879 880 881 882 883 884
			if (netif_running(bond->dev)) {
				bond->send_peer_notif =
					bond->params.num_peer_notif;
				should_notify_peers =
					bond_should_notify_peers(bond);
			}

885
			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
886
			if (should_notify_peers)
887 888
				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
							 bond->dev);
889
		}
J
Jay Vosburgh 已提交
890
	}
891

892
	/* resend IGMP joins since active slave has changed or
893 894
	 * all were sent on curr_active_slave.
	 * resend only if bond is brought up with the affected
895 896
	 * bonding modes and the retransmission is enabled
	 */
897
	if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
898
	    ((bond_uses_primary(bond) && new_active) ||
899
	     BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
900
		bond->igmp_retrans = bond->params.resend_igmp;
901
		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
902
	}
L
Linus Torvalds 已提交
903 904 905 906 907 908
}

/**
 * bond_select_active_slave - select a new active slave, if needed
 * @bond: our bonding struct
 *
S
Stephen Hemminger 已提交
909
 * This functions should be called when one of the following occurs:
L
Linus Torvalds 已提交
910 911 912 913
 * - The old curr_active_slave has been released or lost its link.
 * - The primary_slave has got its link back.
 * - A slave has got its link back and there's no old curr_active_slave.
 *
914
 * Caller must hold RTNL.
L
Linus Torvalds 已提交
915
 */
916
void bond_select_active_slave(struct bonding *bond)
L
Linus Torvalds 已提交
917 918
{
	struct slave *best_slave;
919
	int rv;
L
Linus Torvalds 已提交
920

921 922
	ASSERT_RTNL();

L
Linus Torvalds 已提交
923
	best_slave = bond_find_best_slave(bond);
924
	if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
L
Linus Torvalds 已提交
925
		bond_change_active_slave(bond, best_slave);
926 927 928 929
		rv = bond_set_carrier(bond);
		if (!rv)
			return;

Z
Zhang Shengju 已提交
930
		if (netif_carrier_ok(bond->dev))
931
			netdev_info(bond->dev, "first active interface up!\n");
Z
Zhang Shengju 已提交
932
		else
933
			netdev_info(bond->dev, "now running without any active interface!\n");
L
Linus Torvalds 已提交
934 935 936
	}
}

937
#ifdef CONFIG_NET_POLL_CONTROLLER
938
static inline int slave_enable_netpoll(struct slave *slave)
939
{
940 941
	struct netpoll *np;
	int err = 0;
942

943
	np = kzalloc(sizeof(*np), GFP_KERNEL);
944 945 946 947
	err = -ENOMEM;
	if (!np)
		goto out;

948
	err = __netpoll_setup(np, slave->dev);
949 950 951
	if (err) {
		kfree(np);
		goto out;
952
	}
953 954 955 956 957 958 959 960 961 962 963 964
	slave->np = np;
out:
	return err;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
	struct netpoll *np = slave->np;

	if (!np)
		return;

	slave->np = NULL;
965
	__netpoll_free_async(np);
966
}
967 968 969

static void bond_poll_controller(struct net_device *bond_dev)
{
970 971 972 973 974 975 976 977 978 979
	struct bonding *bond = netdev_priv(bond_dev);
	struct slave *slave = NULL;
	struct list_head *iter;
	struct ad_info ad_info;

	if (BOND_MODE(bond) == BOND_MODE_8023AD)
		if (bond_3ad_get_active_agg_info(bond, &ad_info))
			return;

	bond_for_each_slave_rcu(bond, slave, iter) {
980
		if (!bond_slave_is_up(slave))
981 982 983 984 985 986 987 988 989 990 991
			continue;

		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			struct aggregator *agg =
			    SLAVE_AD_INFO(slave)->port.aggregator;

			if (agg &&
			    agg->aggregator_identifier != ad_info.aggregator_id)
				continue;
		}

992
		netpoll_poll_dev(slave->dev);
993
	}
994 995
}

996
static void bond_netpoll_cleanup(struct net_device *bond_dev)
997
{
998
	struct bonding *bond = netdev_priv(bond_dev);
999
	struct list_head *iter;
1000 1001
	struct slave *slave;

1002
	bond_for_each_slave(bond, slave, iter)
1003
		if (bond_slave_is_up(slave))
1004
			slave_disable_netpoll(slave);
1005
}
1006

1007
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1008 1009
{
	struct bonding *bond = netdev_priv(dev);
1010
	struct list_head *iter;
1011
	struct slave *slave;
1012
	int err = 0;
1013

1014
	bond_for_each_slave(bond, slave, iter) {
1015 1016
		err = slave_enable_netpoll(slave);
		if (err) {
1017
			bond_netpoll_cleanup(dev);
1018
			break;
1019 1020
		}
	}
1021
	return err;
1022
}
1023 1024 1025 1026 1027 1028 1029 1030
#else
static inline int slave_enable_netpoll(struct slave *slave)
{
	return 0;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
}
1031 1032 1033 1034 1035
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
}
#endif

L
Linus Torvalds 已提交
1036 1037
/*---------------------------------- IOCTL ----------------------------------*/

1038
static netdev_features_t bond_fix_features(struct net_device *dev,
1039
					   netdev_features_t features)
1040
{
1041
	struct bonding *bond = netdev_priv(dev);
1042
	struct list_head *iter;
1043
	netdev_features_t mask;
1044
	struct slave *slave;
1045

1046
	mask = features;
1047

1048
	features &= ~NETIF_F_ONE_FOR_ALL;
1049
	features |= NETIF_F_ALL_FOR_ALL;
1050

1051
	bond_for_each_slave(bond, slave, iter) {
1052 1053
		features = netdev_increment_features(features,
						     slave->dev->features,
1054 1055
						     mask);
	}
1056
	features = netdev_add_tso_features(features, mask);
1057 1058 1059 1060

	return features;
}

1061
#define BOND_VLAN_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1062 1063
				 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
				 NETIF_F_HIGHDMA | NETIF_F_LRO)
1064

1065 1066
#define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
				 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
1067

1068 1069
static void bond_compute_features(struct bonding *bond)
{
1070 1071
	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
					IFF_XMIT_DST_RELEASE_PERM;
1072
	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1073
	netdev_features_t enc_features  = BOND_ENC_FEATURES;
1074 1075 1076
	struct net_device *bond_dev = bond->dev;
	struct list_head *iter;
	struct slave *slave;
1077
	unsigned short max_hard_header_len = ETH_HLEN;
1078 1079
	unsigned int gso_max_size = GSO_MAX_SIZE;
	u16 gso_max_segs = GSO_MAX_SEGS;
1080

1081
	if (!bond_has_slaves(bond))
1082
		goto done;
1083
	vlan_features &= NETIF_F_ALL_FOR_ALL;
1084

1085
	bond_for_each_slave(bond, slave, iter) {
1086
		vlan_features = netdev_increment_features(vlan_features,
1087 1088
			slave->dev->vlan_features, BOND_VLAN_FEATURES);

1089 1090 1091
		enc_features = netdev_increment_features(enc_features,
							 slave->dev->hw_enc_features,
							 BOND_ENC_FEATURES);
1092
		dst_release_flag &= slave->dev->priv_flags;
1093 1094
		if (slave->dev->hard_header_len > max_hard_header_len)
			max_hard_header_len = slave->dev->hard_header_len;
1095 1096 1097

		gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
		gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
1098
	}
1099
	bond_dev->hard_header_len = max_hard_header_len;
1100

1101
done:
1102
	bond_dev->vlan_features = vlan_features;
1103 1104
	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
				    NETIF_F_GSO_UDP_L4;
1105 1106
	bond_dev->gso_max_segs = gso_max_segs;
	netif_set_gso_max_size(bond_dev, gso_max_size);
1107

1108 1109 1110 1111
	bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
	if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
	    dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
		bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1112

1113
	netdev_change_features(bond_dev);
1114 1115
}

1116 1117 1118
static void bond_setup_by_slave(struct net_device *bond_dev,
				struct net_device *slave_dev)
{
1119
	bond_dev->header_ops	    = slave_dev->header_ops;
1120 1121 1122 1123 1124 1125 1126 1127 1128

	bond_dev->type		    = slave_dev->type;
	bond_dev->hard_header_len   = slave_dev->hard_header_len;
	bond_dev->addr_len	    = slave_dev->addr_len;

	memcpy(bond_dev->broadcast, slave_dev->broadcast,
		slave_dev->addr_len);
}

1129
/* On bonding slaves other than the currently active slave, suppress
1130
 * duplicates except for alb non-mcast/bcast.
1131 1132
 */
static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1133 1134
					    struct slave *slave,
					    struct bonding *bond)
1135
{
1136
	if (bond_is_slave_inactive(slave)) {
1137
		if (BOND_MODE(bond) == BOND_MODE_ALB &&
1138 1139 1140 1141 1142 1143 1144 1145
		    skb->pkt_type != PACKET_BROADCAST &&
		    skb->pkt_type != PACKET_MULTICAST)
			return false;
		return true;
	}
	return false;
}

1146
static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1147
{
1148
	struct sk_buff *skb = *pskb;
1149
	struct slave *slave;
1150
	struct bonding *bond;
1151 1152
	int (*recv_probe)(const struct sk_buff *, struct bonding *,
			  struct slave *);
1153
	int ret = RX_HANDLER_ANOTHER;
1154

1155 1156 1157 1158 1159
	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return RX_HANDLER_CONSUMED;

	*pskb = skb;
1160

J
Jiri Pirko 已提交
1161 1162
	slave = bond_slave_get_rcu(skb->dev);
	bond = slave->bond;
1163

1164
	recv_probe = READ_ONCE(bond->recv_probe);
1165
	if (recv_probe) {
1166 1167 1168 1169
		ret = recv_probe(skb, bond, slave);
		if (ret == RX_HANDLER_CONSUMED) {
			consume_skb(skb);
			return ret;
1170 1171 1172
		}
	}

1173 1174 1175
	/* don't change skb->dev for link-local packets */
	if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
		return RX_HANDLER_PASS;
Z
Zhang Shengju 已提交
1176
	if (bond_should_deliver_exact_match(skb, slave, bond))
1177
		return RX_HANDLER_EXACT;
1178

J
Jiri Pirko 已提交
1179
	skb->dev = bond->dev;
1180

1181
	if (BOND_MODE(bond) == BOND_MODE_ALB &&
J
Jiri Pirko 已提交
1182
	    bond->dev->priv_flags & IFF_BRIDGE_PORT &&
1183 1184
	    skb->pkt_type == PACKET_HOST) {

1185 1186 1187
		if (unlikely(skb_cow_head(skb,
					  skb->data - skb_mac_header(skb)))) {
			kfree_skb(skb);
1188
			return RX_HANDLER_CONSUMED;
1189
		}
1190 1191
		bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
				  bond->dev->addr_len);
1192 1193
	}

1194
	return ret;
1195 1196
}

1197
static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1198
{
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	switch (BOND_MODE(bond)) {
	case BOND_MODE_ROUNDROBIN:
		return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
	case BOND_MODE_ACTIVEBACKUP:
		return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
	case BOND_MODE_BROADCAST:
		return NETDEV_LAG_TX_TYPE_BROADCAST;
	case BOND_MODE_XOR:
	case BOND_MODE_8023AD:
		return NETDEV_LAG_TX_TYPE_HASH;
	default:
		return NETDEV_LAG_TX_TYPE_UNKNOWN;
	}
}

1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
					       enum netdev_lag_tx_type type)
{
	if (type != NETDEV_LAG_TX_TYPE_HASH)
		return NETDEV_LAG_HASH_NONE;

	switch (bond->params.xmit_policy) {
	case BOND_XMIT_POLICY_LAYER2:
		return NETDEV_LAG_HASH_L2;
	case BOND_XMIT_POLICY_LAYER34:
		return NETDEV_LAG_HASH_L34;
	case BOND_XMIT_POLICY_LAYER23:
		return NETDEV_LAG_HASH_L23;
	case BOND_XMIT_POLICY_ENCAP23:
		return NETDEV_LAG_HASH_E23;
	case BOND_XMIT_POLICY_ENCAP34:
		return NETDEV_LAG_HASH_E34;
	default:
		return NETDEV_LAG_HASH_UNKNOWN;
	}
}

1236 1237
static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
				      struct netlink_ext_ack *extack)
1238 1239
{
	struct netdev_lag_upper_info lag_upper_info;
1240
	enum netdev_lag_tx_type type;
1241

1242 1243 1244
	type = bond_lag_tx_type(bond);
	lag_upper_info.tx_type = type;
	lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1245 1246 1247

	return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
					    &lag_upper_info, extack);
1248 1249
}

1250
static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1251
{
1252 1253
	netdev_upper_dev_unlink(slave->dev, bond->dev);
	slave->dev->flags &= ~IFF_SLAVE;
1254 1255
}

1256 1257 1258 1259
static struct slave *bond_alloc_slave(struct bonding *bond)
{
	struct slave *slave = NULL;

Z
Zhang Shengju 已提交
1260
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1261 1262 1263
	if (!slave)
		return NULL;

1264
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
		SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
					       GFP_KERNEL);
		if (!SLAVE_AD_INFO(slave)) {
			kfree(slave);
			return NULL;
		}
	}
	return slave;
}

static void bond_free_slave(struct slave *slave)
{
	struct bonding *bond = bond_get_bond_by_slave(slave);

1279
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1280 1281 1282 1283 1284
		kfree(SLAVE_AD_INFO(slave));

	kfree(slave);
}

1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
{
	info->bond_mode = BOND_MODE(bond);
	info->miimon = bond->params.miimon;
	info->num_slaves = bond->slave_cnt;
}

static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
{
	strcpy(info->slave_name, slave->dev->name);
	info->link = slave->link;
	info->state = bond_slave_state(slave);
	info->link_failure_count = slave->link_failure_count;
}

1300 1301
static void bond_netdev_notify(struct net_device *dev,
			       struct netdev_bonding_info *info)
1302 1303
{
	rtnl_lock();
1304
	netdev_bonding_info_change(dev, info);
1305 1306 1307 1308 1309 1310 1311 1312
	rtnl_unlock();
}

static void bond_netdev_notify_work(struct work_struct *_work)
{
	struct netdev_notify_work *w =
		container_of(_work, struct netdev_notify_work, work.work);

1313
	bond_netdev_notify(w->dev, &w->bonding_info);
1314
	dev_put(w->dev);
1315
	kfree(w);
1316 1317 1318 1319
}

void bond_queue_slave_event(struct slave *slave)
{
1320
	struct bonding *bond = slave->bond;
1321 1322 1323 1324 1325
	struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);

	if (!nnw)
		return;

1326
	dev_hold(slave->dev);
1327
	nnw->dev = slave->dev;
1328 1329 1330
	bond_fill_ifslave(slave, &nnw->bonding_info.slave);
	bond_fill_ifbond(bond, &nnw->bonding_info.master);
	INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
1331

1332
	queue_delayed_work(slave->bond->wq, &nnw->work, 0);
1333 1334
}

1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
void bond_lower_state_changed(struct slave *slave)
{
	struct netdev_lag_lower_state_info info;

	info.link_up = slave->link == BOND_LINK_UP ||
		       slave->link == BOND_LINK_FAIL;
	info.tx_enabled = bond_is_active_slave(slave);
	netdev_lower_state_changed(slave->dev, &info);
}

L
Linus Torvalds 已提交
1345
/* enslave device <slave> to bond device <master> */
D
David Ahern 已提交
1346 1347
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
		 struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
1348
{
1349
	struct bonding *bond = netdev_priv(bond_dev);
1350
	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1351
	struct slave *new_slave = NULL, *prev_slave;
1352
	struct sockaddr_storage ss;
L
Linus Torvalds 已提交
1353
	int link_reporting;
1354
	int res = 0, i;
L
Linus Torvalds 已提交
1355

1356 1357 1358
	if (!bond->params.use_carrier &&
	    slave_dev->ethtool_ops->get_link == NULL &&
	    slave_ops->ndo_do_ioctl == NULL) {
1359 1360
		netdev_warn(bond_dev, "no link monitoring support for %s\n",
			    slave_dev->name);
L
Linus Torvalds 已提交
1361 1362
	}

M
Mahesh Bandewar 已提交
1363 1364
	/* already in-use? */
	if (netdev_is_rx_handler_busy(slave_dev)) {
1365
		NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved");
M
Mahesh Bandewar 已提交
1366 1367
		netdev_err(bond_dev,
			   "Error: Device is in use and cannot be enslaved\n");
L
Linus Torvalds 已提交
1368 1369 1370
		return -EBUSY;
	}

1371
	if (bond_dev == slave_dev) {
1372
		NL_SET_ERR_MSG(extack, "Cannot enslave bond to itself.");
1373
		netdev_err(bond_dev, "cannot enslave bond to itself.\n");
1374 1375 1376
		return -EPERM;
	}

L
Linus Torvalds 已提交
1377 1378 1379
	/* vlan challenged mutual exclusion */
	/* no need to lock since we're protected by rtnl_lock */
	if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1380 1381
		netdev_dbg(bond_dev, "%s is NETIF_F_VLAN_CHALLENGED\n",
			   slave_dev->name);
1382
		if (vlan_uses_dev(bond_dev)) {
1383
			NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond");
1384 1385
			netdev_err(bond_dev, "Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
				   slave_dev->name, bond_dev->name);
L
Linus Torvalds 已提交
1386 1387
			return -EPERM;
		} else {
1388 1389 1390
			netdev_warn(bond_dev, "enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
				    slave_dev->name, slave_dev->name,
				    bond_dev->name);
L
Linus Torvalds 已提交
1391 1392
		}
	} else {
1393 1394
		netdev_dbg(bond_dev, "%s is !NETIF_F_VLAN_CHALLENGED\n",
			   slave_dev->name);
L
Linus Torvalds 已提交
1395 1396
	}

1397
	/* Old ifenslave binaries are no longer supported.  These can
S
Stephen Hemminger 已提交
1398
	 * be identified with moderate accuracy by the state of the slave:
1399 1400 1401
	 * the current ifenslave will set the interface down prior to
	 * enslaving it; the old ifenslave will not.
	 */
Y
yzhu1 已提交
1402
	if (slave_dev->flags & IFF_UP) {
1403
		NL_SET_ERR_MSG(extack, "Device can not be enslaved while up");
1404 1405
		netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n",
			   slave_dev->name);
1406
		return -EPERM;
1407
	}
L
Linus Torvalds 已提交
1408

1409 1410 1411 1412 1413 1414 1415
	/* set bonding device ether type by slave - bonding netdevices are
	 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
	 * there is a need to override some of the type dependent attribs/funcs.
	 *
	 * bond ether type mutual exclusion - don't allow slaves of dissimilar
	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
	 */
1416
	if (!bond_has_slaves(bond)) {
1417
		if (bond_dev->type != slave_dev->type) {
1418 1419
			netdev_dbg(bond_dev, "change device type from %d to %d\n",
				   bond_dev->type, slave_dev->type);
1420

1421 1422
			res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
						       bond_dev);
1423 1424
			res = notifier_to_errno(res);
			if (res) {
1425
				netdev_err(bond_dev, "refused to change device type\n");
1426
				return -EBUSY;
1427
			}
1428

1429
			/* Flush unicast and multicast addresses */
1430
			dev_uc_flush(bond_dev);
1431
			dev_mc_flush(bond_dev);
1432

1433 1434
			if (slave_dev->type != ARPHRD_ETHER)
				bond_setup_by_slave(bond_dev, slave_dev);
1435
			else {
1436
				ether_setup(bond_dev);
1437 1438
				bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
			}
1439

1440 1441
			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
						 bond_dev);
1442
		}
1443
	} else if (bond_dev->type != slave_dev->type) {
1444
		NL_SET_ERR_MSG(extack, "Device type is different from other slaves");
1445 1446
		netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
			   slave_dev->name, slave_dev->type, bond_dev->type);
1447
		return -EINVAL;
1448 1449
	}

1450 1451
	if (slave_dev->type == ARPHRD_INFINIBAND &&
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1452
		NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves");
1453 1454 1455 1456 1457 1458 1459 1460
		netdev_warn(bond_dev, "Type (%d) supports only active-backup mode\n",
			    slave_dev->type);
		res = -EOPNOTSUPP;
		goto err_undo_flags;
	}

	if (!slave_ops->ndo_set_mac_address ||
	    slave_dev->type == ARPHRD_INFINIBAND) {
1461
		netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
1462 1463 1464
		if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
		    bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
			if (!bond_has_slaves(bond)) {
1465
				bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1466
				netdev_warn(bond_dev, "Setting fail_over_mac to active for active-backup mode\n");
1467
			} else {
1468
				NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1469
				netdev_err(bond_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
1470 1471
				res = -EOPNOTSUPP;
				goto err_undo_flags;
1472
			}
1473
		}
L
Linus Torvalds 已提交
1474 1475
	}

1476 1477
	call_netdevice_notifiers(NETDEV_JOIN, slave_dev);

1478
	/* If this is the first slave, then we need to set the master's hardware
1479 1480
	 * address to be the same as the slave's.
	 */
1481
	if (!bond_has_slaves(bond) &&
1482
	    bond->dev->addr_assign_type == NET_ADDR_RANDOM)
1483
		bond_set_dev_addr(bond->dev, slave_dev);
1484

1485
	new_slave = bond_alloc_slave(bond);
L
Linus Torvalds 已提交
1486 1487 1488 1489
	if (!new_slave) {
		res = -ENOMEM;
		goto err_undo_flags;
	}
1490

1491 1492
	new_slave->bond = bond;
	new_slave->dev = slave_dev;
1493
	/* Set the new_slave's queue_id to be zero.  Queue ID mapping
1494 1495 1496 1497
	 * is set via sysfs or module option if desired.
	 */
	new_slave->queue_id = 0;

1498 1499 1500 1501
	/* Save slave's original mtu and then set it to match the bond */
	new_slave->original_mtu = slave_dev->mtu;
	res = dev_set_mtu(slave_dev, bond->dev->mtu);
	if (res) {
1502
		netdev_dbg(bond_dev, "Error %d calling dev_set_mtu\n", res);
1503 1504 1505
		goto err_free;
	}

1506
	/* Save slave's original ("permanent") mac address for modes
1507 1508 1509
	 * that need it, and for restoring it upon release, and then
	 * set it to the master's address
	 */
1510 1511
	bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
			  slave_dev->addr_len);
L
Linus Torvalds 已提交
1512

1513
	if (!bond->params.fail_over_mac ||
1514
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1515
		/* Set slave to master's mac address.  The application already
1516 1517
		 * set the master's mac address to that of the first slave
		 */
1518 1519 1520
		memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
		ss.ss_family = slave_dev->type;
		res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
1521
		if (res) {
1522
			netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
1523
			goto err_restore_mtu;
1524
		}
1525
	}
L
Linus Torvalds 已提交
1526

1527 1528 1529
	/* set slave flag before open to prevent IPv6 addrconf */
	slave_dev->flags |= IFF_SLAVE;

1530 1531 1532
	/* open the slave since the application closed it */
	res = dev_open(slave_dev);
	if (res) {
1533
		netdev_dbg(bond_dev, "Opening slave %s failed\n", slave_dev->name);
1534
		goto err_restore_mac;
L
Linus Torvalds 已提交
1535 1536
	}

1537
	slave_dev->priv_flags |= IFF_BONDING;
1538 1539
	/* initialize slave stats */
	dev_get_stats(new_slave->dev, &new_slave->slave_stats);
L
Linus Torvalds 已提交
1540

1541
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
1542 1543 1544 1545
		/* bond_alb_init_slave() must be called before all other stages since
		 * it might fail and we do not want to have to undo everything
		 */
		res = bond_alb_init_slave(bond, new_slave);
S
Stephen Hemminger 已提交
1546
		if (res)
1547
			goto err_close;
L
Linus Torvalds 已提交
1548 1549
	}

1550 1551
	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
	if (res) {
1552 1553
		netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
			   slave_dev->name);
1554
		goto err_close;
1555
	}
L
Linus Torvalds 已提交
1556

1557
	prev_slave = bond_last_slave(bond);
L
Linus Torvalds 已提交
1558 1559 1560 1561

	new_slave->delay = 0;
	new_slave->link_failure_count = 0;

1562 1563
	if (bond_update_speed_duplex(new_slave) &&
	    bond_needs_speed_duplex(bond))
1564
		new_slave->link = BOND_LINK_DOWN;
1565

1566
	new_slave->last_rx = jiffies -
1567
		(msecs_to_jiffies(bond->params.arp_interval) + 1);
1568
	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1569
		new_slave->target_last_arp_rx[i] = new_slave->last_rx;
1570

L
Linus Torvalds 已提交
1571 1572 1573 1574
	if (bond->params.miimon && !bond->params.use_carrier) {
		link_reporting = bond_check_dev_link(bond, slave_dev, 1);

		if ((link_reporting == -1) && !bond->params.arp_interval) {
1575
			/* miimon is set but a bonded network driver
L
Linus Torvalds 已提交
1576 1577 1578 1579 1580 1581 1582
			 * does not support ETHTOOL/MII and
			 * arp_interval is not set.  Note: if
			 * use_carrier is enabled, we will never go
			 * here (because netif_carrier is always
			 * supported); thus, we don't need to change
			 * the messages for netif_carrier.
			 */
1583 1584
			netdev_warn(bond_dev, "MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
				    slave_dev->name);
L
Linus Torvalds 已提交
1585 1586
		} else if (link_reporting == -1) {
			/* unable get link status using mii/ethtool */
1587 1588
			netdev_warn(bond_dev, "can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
				    slave_dev->name);
L
Linus Torvalds 已提交
1589 1590 1591 1592
		}
	}

	/* check for initial state */
1593
	new_slave->link = BOND_LINK_NOCHANGE;
1594 1595 1596
	if (bond->params.miimon) {
		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
			if (bond->params.updelay) {
1597
				bond_set_slave_link_state(new_slave,
1598 1599
							  BOND_LINK_BACK,
							  BOND_SLAVE_NOTIFY_NOW);
1600 1601
				new_slave->delay = bond->params.updelay;
			} else {
1602
				bond_set_slave_link_state(new_slave,
1603 1604
							  BOND_LINK_UP,
							  BOND_SLAVE_NOTIFY_NOW);
1605
			}
L
Linus Torvalds 已提交
1606
		} else {
1607 1608
			bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1609
		}
1610
	} else if (bond->params.arp_interval) {
1611 1612
		bond_set_slave_link_state(new_slave,
					  (netif_carrier_ok(slave_dev) ?
1613 1614
					  BOND_LINK_UP : BOND_LINK_DOWN),
					  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1615
	} else {
1616 1617
		bond_set_slave_link_state(new_slave, BOND_LINK_UP,
					  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1618 1619
	}

1620
	if (new_slave->link != BOND_LINK_DOWN)
1621
		new_slave->last_link_up = jiffies;
1622 1623 1624
	netdev_dbg(bond_dev, "Initial state of slave_dev is BOND_LINK_%s\n",
		   new_slave->link == BOND_LINK_DOWN ? "DOWN" :
		   (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1625

1626
	if (bond_uses_primary(bond) && bond->params.primary[0]) {
L
Linus Torvalds 已提交
1627
		/* if there is a primary slave, remember it */
1628
		if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1629
			rcu_assign_pointer(bond->primary_slave, new_slave);
1630 1631
			bond->force_primary = true;
		}
L
Linus Torvalds 已提交
1632 1633
	}

1634
	switch (BOND_MODE(bond)) {
L
Linus Torvalds 已提交
1635
	case BOND_MODE_ACTIVEBACKUP:
1636 1637
		bond_set_slave_inactive_flags(new_slave,
					      BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1638 1639 1640 1641 1642 1643
		break;
	case BOND_MODE_8023AD:
		/* in 802.3ad mode, the internal mechanism
		 * will activate the slaves in the selected
		 * aggregator
		 */
1644
		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1645
		/* if this is the first slave */
1646
		if (!prev_slave) {
1647
			SLAVE_AD_INFO(new_slave)->id = 1;
L
Linus Torvalds 已提交
1648 1649 1650
			/* Initialize AD with the number of times that the AD timer is called in 1 second
			 * can be called only after the mac address of the bond is set
			 */
1651
			bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
L
Linus Torvalds 已提交
1652
		} else {
1653 1654
			SLAVE_AD_INFO(new_slave)->id =
				SLAVE_AD_INFO(prev_slave)->id + 1;
L
Linus Torvalds 已提交
1655 1656 1657 1658 1659 1660
		}

		bond_3ad_bind_slave(new_slave);
		break;
	case BOND_MODE_TLB:
	case BOND_MODE_ALB:
J
Jiri Pirko 已提交
1661
		bond_set_active_slave(new_slave);
1662
		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1663 1664
		break;
	default:
1665
		netdev_dbg(bond_dev, "This slave is always active in trunk mode\n");
L
Linus Torvalds 已提交
1666 1667

		/* always active in trunk mode */
J
Jiri Pirko 已提交
1668
		bond_set_active_slave(new_slave);
L
Linus Torvalds 已提交
1669 1670 1671 1672 1673

		/* In trunking mode there is little meaning to curr_active_slave
		 * anyway (it holds no special properties of the bond device),
		 * so we can change it without calling change_active_interface()
		 */
1674 1675
		if (!rcu_access_pointer(bond->curr_active_slave) &&
		    new_slave->link == BOND_LINK_UP)
1676
			rcu_assign_pointer(bond->curr_active_slave, new_slave);
S
Stephen Hemminger 已提交
1677

L
Linus Torvalds 已提交
1678 1679 1680
		break;
	} /* switch(bond_mode) */

1681
#ifdef CONFIG_NET_POLL_CONTROLLER
1682
	if (bond->dev->npinfo) {
1683
		if (slave_enable_netpoll(new_slave)) {
1684
			netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
1685
			res = -EBUSY;
1686
			goto err_detach;
1687
		}
1688 1689
	}
#endif
1690

1691 1692 1693
	if (!(bond_dev->features & NETIF_F_LRO))
		dev_disable_lro(slave_dev);

J
Jiri Pirko 已提交
1694 1695 1696
	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
					 new_slave);
	if (res) {
1697
		netdev_dbg(bond_dev, "Error %d calling netdev_rx_handler_register\n", res);
1698
		goto err_detach;
J
Jiri Pirko 已提交
1699 1700
	}

1701
	res = bond_master_upper_dev_link(bond, new_slave, extack);
1702
	if (res) {
1703
		netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res);
1704 1705 1706
		goto err_unregister;
	}

1707 1708
	res = bond_sysfs_slave_add(new_slave);
	if (res) {
1709
		netdev_dbg(bond_dev, "Error %d calling bond_sysfs_slave_add\n", res);
1710 1711 1712
		goto err_upper_unlink;
	}

1713 1714
	bond->nest_level = dev_get_nest_level(bond_dev) + 1;

1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
	/* If the mode uses primary, then the following is handled by
	 * bond_change_active_slave().
	 */
	if (!bond_uses_primary(bond)) {
		/* set promiscuity level to new slave */
		if (bond_dev->flags & IFF_PROMISC) {
			res = dev_set_promiscuity(slave_dev, 1);
			if (res)
				goto err_sysfs_del;
		}

		/* set allmulti level to new slave */
		if (bond_dev->flags & IFF_ALLMULTI) {
			res = dev_set_allmulti(slave_dev, 1);
1729 1730 1731
			if (res) {
				if (bond_dev->flags & IFF_PROMISC)
					dev_set_promiscuity(slave_dev, -1);
1732
				goto err_sysfs_del;
1733
			}
1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748
		}

		netif_addr_lock_bh(bond_dev);
		dev_mc_sync_multiple(slave_dev, bond_dev);
		dev_uc_sync_multiple(slave_dev, bond_dev);
		netif_addr_unlock_bh(bond_dev);

		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			/* add lacpdu mc addr to mc list */
			u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;

			dev_mc_add(slave_dev, lacpdu_multicast);
		}
	}

1749 1750 1751 1752
	bond->slave_cnt++;
	bond_compute_features(bond);
	bond_set_carrier(bond);

1753
	if (bond_uses_primary(bond)) {
1754
		block_netpoll_tx();
1755
		bond_select_active_slave(bond);
1756
		unblock_netpoll_tx();
1757
	}
1758

1759
	if (bond_mode_can_use_xmit_hash(bond))
1760 1761
		bond_update_slave_arr(bond, NULL);

1762

1763 1764 1765 1766
	netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
		    slave_dev->name,
		    bond_is_active_slave(new_slave) ? "an active" : "a backup",
		    new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
L
Linus Torvalds 已提交
1767 1768

	/* enslave is successful */
1769
	bond_queue_slave_event(new_slave);
L
Linus Torvalds 已提交
1770 1771 1772
	return 0;

/* Undo stages on error */
1773 1774 1775
err_sysfs_del:
	bond_sysfs_slave_del(new_slave);

1776
err_upper_unlink:
1777
	bond_upper_dev_unlink(bond, new_slave);
1778

1779 1780 1781
err_unregister:
	netdev_rx_handler_unregister(slave_dev);

1782
err_detach:
1783
	vlan_vids_del_by_dev(slave_dev, bond_dev);
1784 1785
	if (rcu_access_pointer(bond->primary_slave) == new_slave)
		RCU_INIT_POINTER(bond->primary_slave, NULL);
1786
	if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
1787
		block_netpoll_tx();
1788
		bond_change_active_slave(bond, NULL);
1789
		bond_select_active_slave(bond);
1790
		unblock_netpoll_tx();
1791
	}
1792 1793
	/* either primary_slave or curr_active_slave might've changed */
	synchronize_rcu();
1794
	slave_disable_netpoll(new_slave);
1795

L
Linus Torvalds 已提交
1796
err_close:
1797
	slave_dev->priv_flags &= ~IFF_BONDING;
L
Linus Torvalds 已提交
1798 1799 1800
	dev_close(slave_dev);

err_restore_mac:
1801
	slave_dev->flags &= ~IFF_SLAVE;
1802
	if (!bond->params.fail_over_mac ||
1803
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1804 1805 1806 1807
		/* XXX TODO - fom follow mode needs to change master's
		 * MAC if this slave's MAC is in use by the bond, or at
		 * least print a warning.
		 */
1808 1809 1810 1811
		bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
				  new_slave->dev->addr_len);
		ss.ss_family = slave_dev->type;
		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
1812
	}
L
Linus Torvalds 已提交
1813

1814 1815 1816
err_restore_mtu:
	dev_set_mtu(slave_dev, new_slave->original_mtu);

L
Linus Torvalds 已提交
1817
err_free:
1818
	bond_free_slave(new_slave);
L
Linus Torvalds 已提交
1819 1820

err_undo_flags:
1821
	/* Enslave of first slave has failed and we need to fix master's mac */
1822 1823 1824 1825 1826
	if (!bond_has_slaves(bond)) {
		if (ether_addr_equal_64bits(bond_dev->dev_addr,
					    slave_dev->dev_addr))
			eth_hw_addr_random(bond_dev);
		if (bond_dev->type != ARPHRD_ETHER) {
1827
			dev_close(bond_dev);
1828 1829 1830 1831 1832
			ether_setup(bond_dev);
			bond_dev->flags |= IFF_MASTER;
			bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
		}
	}
S
Stephen Hemminger 已提交
1833

L
Linus Torvalds 已提交
1834 1835 1836
	return res;
}

1837
/* Try to release the slave device <slave> from the bond device <master>
L
Linus Torvalds 已提交
1838
 * It is legal to access curr_active_slave without a lock because all the function
1839
 * is RTNL-locked. If "all" is true it means that the function is being called
1840
 * while destroying a bond interface and all slaves are being released.
L
Linus Torvalds 已提交
1841 1842 1843 1844 1845 1846 1847
 *
 * The rules for slave state should be:
 *   for Active/Backup:
 *     Active stays on all backups go down
 *   for Bonded connections:
 *     The first up interface should be left on and all others downed.
 */
1848 1849
static int __bond_release_one(struct net_device *bond_dev,
			      struct net_device *slave_dev,
1850
			      bool all, bool unregister)
L
Linus Torvalds 已提交
1851
{
1852
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
1853
	struct slave *slave, *oldcurrent;
1854
	struct sockaddr_storage ss;
1855
	int old_flags = bond_dev->flags;
1856
	netdev_features_t old_features = bond_dev->features;
L
Linus Torvalds 已提交
1857 1858 1859

	/* slave is not a slave or master is not master of this slave */
	if (!(slave_dev->flags & IFF_SLAVE) ||
1860
	    !netdev_has_upper_dev(slave_dev, bond_dev)) {
1861
		netdev_dbg(bond_dev, "cannot release %s\n",
1862
			   slave_dev->name);
L
Linus Torvalds 已提交
1863 1864 1865
		return -EINVAL;
	}

1866
	block_netpoll_tx();
L
Linus Torvalds 已提交
1867 1868 1869 1870

	slave = bond_get_slave_by_dev(bond, slave_dev);
	if (!slave) {
		/* not a slave of this bond */
1871 1872
		netdev_info(bond_dev, "%s not enslaved\n",
			    slave_dev->name);
1873
		unblock_netpoll_tx();
L
Linus Torvalds 已提交
1874 1875 1876
		return -EINVAL;
	}

1877 1878
	bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);

1879 1880
	bond_sysfs_slave_del(slave);

1881 1882 1883
	/* recompute stats just before removing the slave */
	bond_get_stats(bond->dev, &bond->bond_stats);

1884
	bond_upper_dev_unlink(bond, slave);
J
Jiri Pirko 已提交
1885 1886 1887 1888 1889
	/* unregister rx_handler early so bond_handle_frame wouldn't be called
	 * for this slave anymore.
	 */
	netdev_rx_handler_unregister(slave_dev);

1890
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
L
Linus Torvalds 已提交
1891 1892
		bond_3ad_unbind_slave(slave);

1893
	if (bond_mode_can_use_xmit_hash(bond))
1894 1895
		bond_update_slave_arr(bond, slave);

1896 1897 1898
	netdev_info(bond_dev, "Releasing %s interface %s\n",
		    bond_is_active_slave(slave) ? "active" : "backup",
		    slave_dev->name);
L
Linus Torvalds 已提交
1899

1900
	oldcurrent = rcu_access_pointer(bond->curr_active_slave);
L
Linus Torvalds 已提交
1901

1902
	RCU_INIT_POINTER(bond->current_arp_slave, NULL);
L
Linus Torvalds 已提交
1903

1904
	if (!all && (!bond->params.fail_over_mac ||
1905
		     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
1906
		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
1907
		    bond_has_slaves(bond))
1908 1909 1910
			netdev_warn(bond_dev, "the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
				    slave_dev->name, slave->perm_hwaddr,
				    bond_dev->name, slave_dev->name);
1911 1912
	}

1913 1914
	if (rtnl_dereference(bond->primary_slave) == slave)
		RCU_INIT_POINTER(bond->primary_slave, NULL);
L
Linus Torvalds 已提交
1915

1916
	if (oldcurrent == slave)
L
Linus Torvalds 已提交
1917 1918
		bond_change_active_slave(bond, NULL);

1919
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
1920 1921 1922 1923 1924 1925 1926 1927
		/* Must be called only after the slave has been
		 * detached from the list and the curr_active_slave
		 * has been cleared (if our_slave == old_current),
		 * but before a new active slave is selected.
		 */
		bond_alb_deinit_slave(bond, slave);
	}

1928
	if (all) {
1929
		RCU_INIT_POINTER(bond->curr_active_slave, NULL);
1930
	} else if (oldcurrent == slave) {
1931
		/* Note that we hold RTNL over this sequence, so there
1932 1933 1934
		 * is no concern that another slave add/remove event
		 * will interfere.
		 */
L
Linus Torvalds 已提交
1935
		bond_select_active_slave(bond);
1936 1937
	}

1938
	if (!bond_has_slaves(bond)) {
1939
		bond_set_carrier(bond);
1940
		eth_hw_addr_random(bond_dev);
L
Linus Torvalds 已提交
1941 1942
	}

1943
	unblock_netpoll_tx();
1944
	synchronize_rcu();
1945
	bond->slave_cnt--;
L
Linus Torvalds 已提交
1946

1947
	if (!bond_has_slaves(bond)) {
1948
		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
1949 1950
		call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
	}
1951

1952 1953 1954
	bond_compute_features(bond);
	if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
	    (old_features & NETIF_F_VLAN_CHALLENGED))
1955 1956
		netdev_info(bond_dev, "last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
			    slave_dev->name, bond_dev->name);
1957

1958
	vlan_vids_del_by_dev(slave_dev, bond_dev);
L
Linus Torvalds 已提交
1959

1960
	/* If the mode uses primary, then this case was handled above by
1961
	 * bond_change_active_slave(..., NULL)
L
Linus Torvalds 已提交
1962
	 */
1963
	if (!bond_uses_primary(bond)) {
1964 1965 1966 1967 1968 1969 1970 1971
		/* unset promiscuity level from slave
		 * NOTE: The NETDEV_CHANGEADDR call above may change the value
		 * of the IFF_PROMISC flag in the bond_dev, but we need the
		 * value of that flag before that change, as that was the value
		 * when this slave was attached, so we cache at the start of the
		 * function and use it here. Same goes for ALLMULTI below
		 */
		if (old_flags & IFF_PROMISC)
L
Linus Torvalds 已提交
1972 1973 1974
			dev_set_promiscuity(slave_dev, -1);

		/* unset allmulti level from slave */
1975
		if (old_flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
1976 1977
			dev_set_allmulti(slave_dev, -1);

1978
		bond_hw_addr_flush(bond_dev, slave_dev);
L
Linus Torvalds 已提交
1979 1980
	}

1981
	slave_disable_netpoll(slave);
1982

L
Linus Torvalds 已提交
1983 1984 1985
	/* close slave before restoring its mac address */
	dev_close(slave_dev);

1986
	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
1987
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1988
		/* restore original ("permanent") mac address */
1989 1990 1991 1992
		bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
				  slave->dev->addr_len);
		ss.ss_family = slave_dev->type;
		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
1993
	}
L
Linus Torvalds 已提交
1994

1995 1996 1997 1998
	if (unregister)
		__dev_set_mtu(slave_dev, slave->original_mtu);
	else
		dev_set_mtu(slave_dev, slave->original_mtu);
1999

2000
	slave_dev->priv_flags &= ~IFF_BONDING;
L
Linus Torvalds 已提交
2001

2002
	bond_free_slave(slave);
L
Linus Torvalds 已提交
2003

2004
	return 0;
L
Linus Torvalds 已提交
2005 2006
}

2007 2008 2009
/* A wrapper used because of ndo_del_link */
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
{
2010
	return __bond_release_one(bond_dev, slave_dev, false, false);
2011 2012
}

2013 2014 2015
/* First release a slave and then destroy the bond if no more slaves are left.
 * Must be under rtnl_lock when this function is called.
 */
2016 2017
static int  bond_release_and_destroy(struct net_device *bond_dev,
				     struct net_device *slave_dev)
2018
{
2019
	struct bonding *bond = netdev_priv(bond_dev);
2020 2021
	int ret;

2022
	ret = __bond_release_one(bond_dev, slave_dev, false, true);
2023
	if (ret == 0 && !bond_has_slaves(bond)) {
2024
		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2025 2026
		netdev_info(bond_dev, "Destroying bond %s\n",
			    bond_dev->name);
2027
		bond_remove_proc_entry(bond);
S
Stephen Hemminger 已提交
2028
		unregister_netdevice(bond_dev);
2029 2030 2031 2032
	}
	return ret;
}

2033
static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
L
Linus Torvalds 已提交
2034
{
2035
	struct bonding *bond = netdev_priv(bond_dev);
2036
	bond_fill_ifbond(bond, info);
L
Linus Torvalds 已提交
2037 2038 2039 2040
}

static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
2041
	struct bonding *bond = netdev_priv(bond_dev);
2042
	struct list_head *iter;
2043
	int i = 0, res = -ENODEV;
L
Linus Torvalds 已提交
2044 2045
	struct slave *slave;

2046
	bond_for_each_slave(bond, slave, iter) {
2047
		if (i++ == (int)info->slave_id) {
2048
			res = 0;
2049
			bond_fill_ifslave(slave, info);
L
Linus Torvalds 已提交
2050 2051 2052 2053
			break;
		}
	}

2054
	return res;
L
Linus Torvalds 已提交
2055 2056 2057 2058
}

/*-------------------------------- Monitoring -------------------------------*/

2059
/* called with rcu_read_lock() */
J
Jay Vosburgh 已提交
2060 2061
static int bond_miimon_inspect(struct bonding *bond)
{
2062
	int link_state, commit = 0;
2063
	struct list_head *iter;
J
Jay Vosburgh 已提交
2064
	struct slave *slave;
2065 2066
	bool ignore_updelay;

2067
	ignore_updelay = !rcu_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
2068

2069
	bond_for_each_slave_rcu(bond, slave, iter) {
J
Jay Vosburgh 已提交
2070
		slave->new_link = BOND_LINK_NOCHANGE;
2071
		slave->link_new_state = slave->link;
L
Linus Torvalds 已提交
2072

J
Jay Vosburgh 已提交
2073
		link_state = bond_check_dev_link(bond, slave->dev, 0);
L
Linus Torvalds 已提交
2074 2075

		switch (slave->link) {
J
Jay Vosburgh 已提交
2076 2077 2078
		case BOND_LINK_UP:
			if (link_state)
				continue;
L
Linus Torvalds 已提交
2079

2080
			bond_propose_link_state(slave, BOND_LINK_FAIL);
2081
			commit++;
J
Jay Vosburgh 已提交
2082 2083
			slave->delay = bond->params.downdelay;
			if (slave->delay) {
2084 2085 2086 2087 2088 2089 2090
				netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
					    (BOND_MODE(bond) ==
					     BOND_MODE_ACTIVEBACKUP) ?
					     (bond_is_active_slave(slave) ?
					      "active " : "backup ") : "",
					    slave->dev->name,
					    bond->params.downdelay * bond->params.miimon);
L
Linus Torvalds 已提交
2091
			}
J
Jay Vosburgh 已提交
2092 2093 2094
			/*FALLTHRU*/
		case BOND_LINK_FAIL:
			if (link_state) {
2095
				/* recovered before downdelay expired */
2096
				bond_propose_link_state(slave, BOND_LINK_UP);
2097
				slave->last_link_up = jiffies;
2098 2099 2100 2101
				netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
					    (bond->params.downdelay - slave->delay) *
					    bond->params.miimon,
					    slave->dev->name);
2102
				commit++;
J
Jay Vosburgh 已提交
2103
				continue;
L
Linus Torvalds 已提交
2104
			}
J
Jay Vosburgh 已提交
2105 2106 2107 2108 2109

			if (slave->delay <= 0) {
				slave->new_link = BOND_LINK_DOWN;
				commit++;
				continue;
L
Linus Torvalds 已提交
2110 2111
			}

J
Jay Vosburgh 已提交
2112 2113 2114 2115 2116 2117 2118
			slave->delay--;
			break;

		case BOND_LINK_DOWN:
			if (!link_state)
				continue;

2119
			bond_propose_link_state(slave, BOND_LINK_BACK);
2120
			commit++;
J
Jay Vosburgh 已提交
2121 2122 2123
			slave->delay = bond->params.updelay;

			if (slave->delay) {
2124 2125 2126 2127 2128
				netdev_info(bond->dev, "link status up for interface %s, enabling it in %d ms\n",
					    slave->dev->name,
					    ignore_updelay ? 0 :
					    bond->params.updelay *
					    bond->params.miimon);
J
Jay Vosburgh 已提交
2129 2130 2131 2132
			}
			/*FALLTHRU*/
		case BOND_LINK_BACK:
			if (!link_state) {
2133
				bond_propose_link_state(slave, BOND_LINK_DOWN);
2134 2135 2136 2137
				netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
					    (bond->params.updelay - slave->delay) *
					    bond->params.miimon,
					    slave->dev->name);
2138
				commit++;
J
Jay Vosburgh 已提交
2139 2140 2141
				continue;
			}

2142 2143 2144
			if (ignore_updelay)
				slave->delay = 0;

J
Jay Vosburgh 已提交
2145 2146 2147
			if (slave->delay <= 0) {
				slave->new_link = BOND_LINK_UP;
				commit++;
2148
				ignore_updelay = false;
J
Jay Vosburgh 已提交
2149
				continue;
L
Linus Torvalds 已提交
2150
			}
J
Jay Vosburgh 已提交
2151 2152

			slave->delay--;
L
Linus Torvalds 已提交
2153
			break;
J
Jay Vosburgh 已提交
2154 2155
		}
	}
L
Linus Torvalds 已提交
2156

J
Jay Vosburgh 已提交
2157 2158
	return commit;
}
L
Linus Torvalds 已提交
2159

2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177
static void bond_miimon_link_change(struct bonding *bond,
				    struct slave *slave,
				    char link)
{
	switch (BOND_MODE(bond)) {
	case BOND_MODE_8023AD:
		bond_3ad_handle_link_change(slave, link);
		break;
	case BOND_MODE_TLB:
	case BOND_MODE_ALB:
		bond_alb_handle_link_change(bond, slave, link);
		break;
	case BOND_MODE_XOR:
		bond_update_slave_arr(bond, NULL);
		break;
	}
}

J
Jay Vosburgh 已提交
2178 2179
static void bond_miimon_commit(struct bonding *bond)
{
2180
	struct list_head *iter;
2181
	struct slave *slave, *primary;
J
Jay Vosburgh 已提交
2182

2183
	bond_for_each_slave(bond, slave, iter) {
J
Jay Vosburgh 已提交
2184 2185 2186
		switch (slave->new_link) {
		case BOND_LINK_NOCHANGE:
			continue;
L
Linus Torvalds 已提交
2187

J
Jay Vosburgh 已提交
2188
		case BOND_LINK_UP:
2189 2190
			if (bond_update_speed_duplex(slave) &&
			    bond_needs_speed_duplex(bond)) {
2191
				slave->link = BOND_LINK_DOWN;
2192 2193 2194 2195
				if (net_ratelimit())
					netdev_warn(bond->dev,
						    "failed to get link speed/duplex for %s\n",
						    slave->dev->name);
2196 2197
				continue;
			}
2198 2199
			bond_set_slave_link_state(slave, BOND_LINK_UP,
						  BOND_SLAVE_NOTIFY_NOW);
2200
			slave->last_link_up = jiffies;
J
Jay Vosburgh 已提交
2201

2202
			primary = rtnl_dereference(bond->primary_slave);
2203
			if (BOND_MODE(bond) == BOND_MODE_8023AD) {
J
Jay Vosburgh 已提交
2204
				/* prevent it from being the active one */
J
Jiri Pirko 已提交
2205
				bond_set_backup_slave(slave);
2206
			} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
J
Jay Vosburgh 已提交
2207
				/* make it immediately active */
J
Jiri Pirko 已提交
2208
				bond_set_active_slave(slave);
2209
			} else if (slave != primary) {
J
Jay Vosburgh 已提交
2210
				/* prevent it from being the active one */
J
Jiri Pirko 已提交
2211
				bond_set_backup_slave(slave);
L
Linus Torvalds 已提交
2212 2213
			}

2214 2215 2216 2217
			netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",
				    slave->dev->name,
				    slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
				    slave->duplex ? "full" : "half");
L
Linus Torvalds 已提交
2218

2219
			bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2220

2221
			if (!bond->curr_active_slave || slave == primary)
J
Jay Vosburgh 已提交
2222
				goto do_failover;
L
Linus Torvalds 已提交
2223

J
Jay Vosburgh 已提交
2224
			continue;
2225

J
Jay Vosburgh 已提交
2226
		case BOND_LINK_DOWN:
J
Jay Vosburgh 已提交
2227 2228 2229
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;

2230 2231
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
2232

2233 2234
			if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
			    BOND_MODE(bond) == BOND_MODE_8023AD)
2235 2236
				bond_set_slave_inactive_flags(slave,
							      BOND_SLAVE_NOTIFY_NOW);
J
Jay Vosburgh 已提交
2237

2238 2239
			netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
				    slave->dev->name);
J
Jay Vosburgh 已提交
2240

2241
			bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2242

2243
			if (slave == rcu_access_pointer(bond->curr_active_slave))
J
Jay Vosburgh 已提交
2244 2245 2246 2247 2248
				goto do_failover;

			continue;

		default:
2249 2250
			netdev_err(bond->dev, "invalid new link %d on slave %s\n",
				   slave->new_link, slave->dev->name);
J
Jay Vosburgh 已提交
2251 2252 2253 2254 2255 2256
			slave->new_link = BOND_LINK_NOCHANGE;

			continue;
		}

do_failover:
2257
		block_netpoll_tx();
J
Jay Vosburgh 已提交
2258
		bond_select_active_slave(bond);
2259
		unblock_netpoll_tx();
J
Jay Vosburgh 已提交
2260 2261 2262
	}

	bond_set_carrier(bond);
L
Linus Torvalds 已提交
2263 2264
}

2265
/* bond_mii_monitor
2266 2267
 *
 * Really a wrapper that splits the mii monitor into two phases: an
J
Jay Vosburgh 已提交
2268 2269 2270
 * inspection, then (if inspection indicates something needs to be done)
 * an acquisition of appropriate locks followed by a commit phase to
 * implement whatever link state changes are indicated.
2271
 */
2272
static void bond_mii_monitor(struct work_struct *work)
2273 2274 2275
{
	struct bonding *bond = container_of(work, struct bonding,
					    mii_work.work);
2276
	bool should_notify_peers = false;
2277
	unsigned long delay;
2278 2279
	struct slave *slave;
	struct list_head *iter;
2280

2281 2282 2283
	delay = msecs_to_jiffies(bond->params.miimon);

	if (!bond_has_slaves(bond))
J
Jay Vosburgh 已提交
2284
		goto re_arm;
2285

2286 2287
	rcu_read_lock();

2288 2289
	should_notify_peers = bond_should_notify_peers(bond);

2290
	if (bond_miimon_inspect(bond)) {
2291
		rcu_read_unlock();
J
Jay Vosburgh 已提交
2292

2293 2294 2295 2296 2297 2298
		/* Race avoidance with bond_close cancel of workqueue */
		if (!rtnl_trylock()) {
			delay = 1;
			should_notify_peers = false;
			goto re_arm;
		}
2299

2300 2301 2302
		bond_for_each_slave(bond, slave, iter) {
			bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
		}
2303 2304 2305
		bond_miimon_commit(bond);

		rtnl_unlock();	/* might sleep, hold no other locks */
2306 2307
	} else
		rcu_read_unlock();
2308

J
Jay Vosburgh 已提交
2309
re_arm:
2310
	if (bond->params.miimon)
2311 2312 2313 2314 2315 2316 2317 2318
		queue_delayed_work(bond->wq, &bond->mii_work, delay);

	if (should_notify_peers) {
		if (!rtnl_trylock())
			return;
		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
		rtnl_unlock();
	}
2319
}
J
Jay Vosburgh 已提交
2320

2321 2322 2323 2324 2325 2326 2327
static int bond_upper_dev_walk(struct net_device *upper, void *data)
{
	__be32 ip = *((__be32 *)data);

	return ip == bond_confirm_addr(upper, 0, ip);
}

2328
static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2329
{
2330
	bool ret = false;
2331

2332
	if (ip == bond_confirm_addr(bond->dev, 0, ip))
2333
		return true;
2334

2335
	rcu_read_lock();
2336 2337
	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &ip))
		ret = true;
2338
	rcu_read_unlock();
2339

2340
	return ret;
2341 2342
}

2343
/* We go to the (large) trouble of VLAN tagging ARP frames because
J
Jay Vosburgh 已提交
2344 2345 2346
 * switches in VLAN mode (especially if ports are configured as
 * "native" to a VLAN) might not pass non-tagged frames.
 */
2347 2348
static void bond_arp_send(struct net_device *slave_dev, int arp_op,
			  __be32 dest_ip, __be32 src_ip,
2349
			  struct bond_vlan_tag *tags)
J
Jay Vosburgh 已提交
2350 2351
{
	struct sk_buff *skb;
2352
	struct bond_vlan_tag *outer_tag = tags;
J
Jay Vosburgh 已提交
2353

2354 2355
	netdev_dbg(slave_dev, "arp %d on slave %s: dst %pI4 src %pI4\n",
		   arp_op, slave_dev->name, &dest_ip, &src_ip);
S
Stephen Hemminger 已提交
2356

J
Jay Vosburgh 已提交
2357 2358 2359 2360
	skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
			 NULL, slave_dev->dev_addr, NULL);

	if (!skb) {
2361
		net_err_ratelimited("ARP packet allocation failed\n");
J
Jay Vosburgh 已提交
2362 2363
		return;
	}
2364

2365 2366 2367 2368 2369
	if (!tags || tags->vlan_proto == VLAN_N_VID)
		goto xmit;

	tags++;

2370
	/* Go through all the tags backwards and add them to the packet */
2371 2372 2373
	while (tags->vlan_proto != VLAN_N_VID) {
		if (!tags->vlan_id) {
			tags++;
2374
			continue;
2375
		}
2376

2377
		netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
2378
			   ntohs(outer_tag->vlan_proto), tags->vlan_id);
2379 2380
		skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
						tags->vlan_id);
2381 2382 2383 2384
		if (!skb) {
			net_err_ratelimited("failed to insert inner VLAN tag\n");
			return;
		}
2385 2386

		tags++;
2387 2388
	}
	/* Set the outer tag */
2389
	if (outer_tag->vlan_id) {
2390
		netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
2391
			   ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
J
Jiri Pirko 已提交
2392 2393
		__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
				       outer_tag->vlan_id);
J
Jay Vosburgh 已提交
2394
	}
2395 2396

xmit:
J
Jay Vosburgh 已提交
2397 2398 2399
	arp_xmit(skb);
}

2400 2401 2402 2403 2404 2405
/* Validate the device path between the @start_dev and the @end_dev.
 * The path is valid if the @end_dev is reachable through device
 * stacking.
 * When the path is validated, collect any vlan information in the
 * path.
 */
2406 2407 2408
struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
					      struct net_device *end_dev,
					      int level)
2409
{
2410
	struct bond_vlan_tag *tags;
2411 2412 2413
	struct net_device *upper;
	struct list_head  *iter;

2414
	if (start_dev == end_dev) {
K
Kees Cook 已提交
2415
		tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2416 2417 2418 2419 2420
		if (!tags)
			return ERR_PTR(-ENOMEM);
		tags[level].vlan_proto = VLAN_N_VID;
		return tags;
	}
2421 2422

	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2423 2424 2425 2426 2427
		tags = bond_verify_device_path(upper, end_dev, level + 1);
		if (IS_ERR_OR_NULL(tags)) {
			if (IS_ERR(tags))
				return tags;
			continue;
2428
		}
2429 2430 2431 2432 2433 2434
		if (is_vlan_dev(upper)) {
			tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
			tags[level].vlan_id = vlan_dev_vlan_id(upper);
		}

		return tags;
2435 2436
	}

2437
	return NULL;
2438
}
J
Jay Vosburgh 已提交
2439

L
Linus Torvalds 已提交
2440 2441
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
J
Jay Vosburgh 已提交
2442
	struct rtable *rt;
2443
	struct bond_vlan_tag *tags;
2444
	__be32 *targets = bond->params.arp_targets, addr;
2445
	int i;
L
Linus Torvalds 已提交
2446

2447
	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2448
		netdev_dbg(bond->dev, "basa: target %pI4\n", &targets[i]);
2449
		tags = NULL;
J
Jay Vosburgh 已提交
2450

2451
		/* Find out through which dev should the packet go */
2452 2453
		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
				     RTO_ONLINK, 0);
2454
		if (IS_ERR(rt)) {
2455 2456 2457
			/* there's no route to target - try to send arp
			 * probe to generate any traffic (arp_validate=0)
			 */
2458 2459 2460 2461
			if (bond->params.arp_validate)
				net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
						     bond->dev->name,
						     &targets[i]);
2462 2463
			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
				      0, tags);
J
Jay Vosburgh 已提交
2464 2465 2466
			continue;
		}

2467 2468 2469 2470 2471
		/* bond device itself */
		if (rt->dst.dev == bond->dev)
			goto found;

		rcu_read_lock();
2472
		tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
2473
		rcu_read_unlock();
J
Jay Vosburgh 已提交
2474

2475
		if (!IS_ERR_OR_NULL(tags))
2476 2477
			goto found;

2478
		/* Not our device - skip */
2479 2480
		netdev_dbg(bond->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
			   &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
2481

2482
		ip_rt_put(rt);
2483 2484 2485 2486 2487 2488
		continue;

found:
		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
		ip_rt_put(rt);
		bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2489
			      addr, tags);
2490
		kfree(tags);
J
Jay Vosburgh 已提交
2491 2492 2493
	}
}

2494
static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2495
{
2496 2497
	int i;

2498
	if (!sip || !bond_has_this_ip(bond, tip)) {
2499 2500
		netdev_dbg(bond->dev, "bva: sip %pI4 tip %pI4 not found\n",
			   &sip, &tip);
2501 2502
		return;
	}
2503

2504 2505
	i = bond_get_targets_ip(bond->params.arp_targets, sip);
	if (i == -1) {
2506 2507
		netdev_dbg(bond->dev, "bva: sip %pI4 not found in targets\n",
			   &sip);
2508
		return;
2509
	}
2510
	slave->last_rx = jiffies;
2511
	slave->target_last_arp_rx[i] = jiffies;
2512 2513
}

2514 2515
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
		 struct slave *slave)
2516
{
2517
	struct arphdr *arp = (struct arphdr *)skb->data;
2518
	struct slave *curr_active_slave, *curr_arp_slave;
2519
	unsigned char *arp_ptr;
2520
	__be32 sip, tip;
2521 2522
	int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
	unsigned int alen;
2523

2524
	if (!slave_do_arp_validate(bond, slave)) {
2525 2526
		if ((slave_do_arp_validate_only(bond) && is_arp) ||
		    !slave_do_arp_validate_only(bond))
2527
			slave->last_rx = jiffies;
2528
		return RX_HANDLER_ANOTHER;
2529 2530 2531
	} else if (!is_arp) {
		return RX_HANDLER_ANOTHER;
	}
2532

2533
	alen = arp_hdr_len(bond->dev);
2534

2535 2536
	netdev_dbg(bond->dev, "bond_arp_rcv: skb->dev %s\n",
		   skb->dev->name);
2537

2538 2539 2540 2541 2542 2543 2544
	if (alen > skb_headlen(skb)) {
		arp = kmalloc(alen, GFP_ATOMIC);
		if (!arp)
			goto out_unlock;
		if (skb_copy_bits(skb, 0, arp, alen) < 0)
			goto out_unlock;
	}
2545

2546
	if (arp->ar_hln != bond->dev->addr_len ||
2547 2548 2549 2550 2551 2552 2553 2554
	    skb->pkt_type == PACKET_OTHERHOST ||
	    skb->pkt_type == PACKET_LOOPBACK ||
	    arp->ar_hrd != htons(ARPHRD_ETHER) ||
	    arp->ar_pro != htons(ETH_P_IP) ||
	    arp->ar_pln != 4)
		goto out_unlock;

	arp_ptr = (unsigned char *)(arp + 1);
2555
	arp_ptr += bond->dev->addr_len;
2556
	memcpy(&sip, arp_ptr, 4);
2557
	arp_ptr += 4 + bond->dev->addr_len;
2558 2559
	memcpy(&tip, arp_ptr, 4);

2560 2561 2562 2563
	netdev_dbg(bond->dev, "bond_arp_rcv: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
		   slave->dev->name, bond_slave_state(slave),
		     bond->params.arp_validate, slave_do_arp_validate(bond, slave),
		     &sip, &tip);
2564

2565
	curr_active_slave = rcu_dereference(bond->curr_active_slave);
2566
	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2567

2568
	/* We 'trust' the received ARP enough to validate it if:
2569
	 *
2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588
	 * (a) the slave receiving the ARP is active (which includes the
	 * current ARP slave, if any), or
	 *
	 * (b) the receiving slave isn't active, but there is a currently
	 * active slave and it received valid arp reply(s) after it became
	 * the currently active slave, or
	 *
	 * (c) there is an ARP slave that sent an ARP during the prior ARP
	 * interval, and we receive an ARP reply on any slave.  We accept
	 * these because switch FDB update delays may deliver the ARP
	 * reply to a slave other than the sender of the ARP request.
	 *
	 * Note: for (b), backup slaves are receiving the broadcast ARP
	 * request, not a reply.  This request passes from the sending
	 * slave through the L2 switch(es) to the receiving slave.  Since
	 * this is checking the request, sip/tip are swapped for
	 * validation.
	 *
	 * This is done to avoid endless looping when we can't reach the
2589
	 * arp_ip_target and fool ourselves with our own arp requests.
2590
	 */
J
Jiri Pirko 已提交
2591
	if (bond_is_active_slave(slave))
2592
		bond_validate_arp(bond, slave, sip, tip);
2593 2594 2595
	else if (curr_active_slave &&
		 time_after(slave_last_rx(bond, curr_active_slave),
			    curr_active_slave->last_link_up))
2596
		bond_validate_arp(bond, slave, tip, sip);
2597 2598 2599 2600
	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
		 bond_time_in_interval(bond,
				       dev_trans_start(curr_arp_slave->dev), 1))
		bond_validate_arp(bond, slave, sip, tip);
2601 2602

out_unlock:
2603 2604
	if (arp != (struct arphdr *)skb->data)
		kfree(arp);
2605
	return RX_HANDLER_ANOTHER;
2606 2607
}

2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
/* function to verify if we're in the arp_interval timeslice, returns true if
 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
 */
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
				  int mod)
{
	int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);

	return time_in_range(jiffies,
			     last_act - delta_in_ticks,
			     last_act + mod * delta_in_ticks + delta_in_ticks/2);
}

2622
/* This function is called regularly to monitor each slave's link
L
Linus Torvalds 已提交
2623 2624 2625 2626 2627
 * ensuring that traffic is being sent and received when arp monitoring
 * is used in load-balancing mode. if the adapter has been dormant, then an
 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
 * arp monitoring in active backup mode.
 */
2628
static void bond_loadbalance_arp_mon(struct bonding *bond)
L
Linus Torvalds 已提交
2629 2630
{
	struct slave *slave, *oldcurrent;
2631
	struct list_head *iter;
2632
	int do_failover = 0, slave_state_changed = 0;
L
Linus Torvalds 已提交
2633

2634
	if (!bond_has_slaves(bond))
L
Linus Torvalds 已提交
2635 2636
		goto re_arm;

2637 2638
	rcu_read_lock();

2639
	oldcurrent = rcu_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
2640 2641
	/* see if any of the previous devices are up now (i.e. they have
	 * xmt and rcv traffic). the curr_active_slave does not come into
2642 2643 2644
	 * the picture unless it is null. also, slave->last_link_up is not
	 * needed here because we send an arp on each slave and give a slave
	 * as long as it needs to get the tx/rx within the delta.
L
Linus Torvalds 已提交
2645 2646 2647
	 * TODO: what about up/down delay in arp mode? it wasn't here before
	 *       so it can wait
	 */
2648
	bond_for_each_slave_rcu(bond, slave, iter) {
2649 2650
		unsigned long trans_start = dev_trans_start(slave->dev);

2651 2652
		slave->new_link = BOND_LINK_NOCHANGE;

L
Linus Torvalds 已提交
2653
		if (slave->link != BOND_LINK_UP) {
2654
			if (bond_time_in_interval(bond, trans_start, 1) &&
2655
			    bond_time_in_interval(bond, slave->last_rx, 1)) {
L
Linus Torvalds 已提交
2656

2657
				slave->new_link = BOND_LINK_UP;
2658
				slave_state_changed = 1;
L
Linus Torvalds 已提交
2659 2660 2661 2662 2663 2664 2665

				/* primary_slave has no meaning in round-robin
				 * mode. the window of a slave being up and
				 * curr_active_slave being null after enslaving
				 * is closed.
				 */
				if (!oldcurrent) {
2666 2667
					netdev_info(bond->dev, "link status definitely up for interface %s\n",
						    slave->dev->name);
L
Linus Torvalds 已提交
2668 2669
					do_failover = 1;
				} else {
2670 2671
					netdev_info(bond->dev, "interface %s is now up\n",
						    slave->dev->name);
L
Linus Torvalds 已提交
2672 2673 2674 2675 2676 2677 2678 2679 2680
				}
			}
		} else {
			/* slave->link == BOND_LINK_UP */

			/* not all switches will respond to an arp request
			 * when the source ip is 0, so don't take the link down
			 * if we don't know our ip yet
			 */
2681
			if (!bond_time_in_interval(bond, trans_start, 2) ||
2682
			    !bond_time_in_interval(bond, slave->last_rx, 2)) {
L
Linus Torvalds 已提交
2683

2684
				slave->new_link = BOND_LINK_DOWN;
2685
				slave_state_changed = 1;
L
Linus Torvalds 已提交
2686

S
Stephen Hemminger 已提交
2687
				if (slave->link_failure_count < UINT_MAX)
L
Linus Torvalds 已提交
2688 2689
					slave->link_failure_count++;

2690 2691
				netdev_info(bond->dev, "interface %s is now down\n",
					    slave->dev->name);
L
Linus Torvalds 已提交
2692

S
Stephen Hemminger 已提交
2693
				if (slave == oldcurrent)
L
Linus Torvalds 已提交
2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
					do_failover = 1;
			}
		}

		/* note: if switch is in round-robin mode, all links
		 * must tx arp to ensure all links rx an arp - otherwise
		 * links may oscillate or not come up at all; if switch is
		 * in something like xor mode, there is nothing we can
		 * do - all replies will be rx'ed on same link causing slaves
		 * to be unstable during low/no traffic periods
		 */
2705
		if (bond_slave_is_up(slave))
L
Linus Torvalds 已提交
2706 2707 2708
			bond_arp_send_all(bond, slave);
	}

2709 2710
	rcu_read_unlock();

2711
	if (do_failover || slave_state_changed) {
2712 2713
		if (!rtnl_trylock())
			goto re_arm;
L
Linus Torvalds 已提交
2714

2715 2716 2717 2718 2719
		bond_for_each_slave(bond, slave, iter) {
			if (slave->new_link != BOND_LINK_NOCHANGE)
				slave->link = slave->new_link;
		}

2720 2721
		if (slave_state_changed) {
			bond_slave_state_change(bond);
2722 2723
			if (BOND_MODE(bond) == BOND_MODE_XOR)
				bond_update_slave_arr(bond, NULL);
2724 2725
		}
		if (do_failover) {
2726 2727 2728 2729
			block_netpoll_tx();
			bond_select_active_slave(bond);
			unblock_netpoll_tx();
		}
2730
		rtnl_unlock();
L
Linus Torvalds 已提交
2731 2732 2733
	}

re_arm:
2734
	if (bond->params.arp_interval)
2735 2736
		queue_delayed_work(bond->wq, &bond->arp_work,
				   msecs_to_jiffies(bond->params.arp_interval));
L
Linus Torvalds 已提交
2737 2738
}

2739
/* Called to inspect slaves for active-backup mode ARP monitor link state
2740 2741 2742 2743
 * changes.  Sets new_link in slaves to specify what action should take
 * place for the slave.  Returns 0 if no changes are found, >0 if changes
 * to link states must be committed.
 *
2744
 * Called with rcu_read_lock held.
L
Linus Torvalds 已提交
2745
 */
2746
static int bond_ab_arp_inspect(struct bonding *bond)
L
Linus Torvalds 已提交
2747
{
2748
	unsigned long trans_start, last_rx;
2749
	struct list_head *iter;
2750 2751
	struct slave *slave;
	int commit = 0;
2752

2753
	bond_for_each_slave_rcu(bond, slave, iter) {
2754
		slave->new_link = BOND_LINK_NOCHANGE;
2755
		last_rx = slave_last_rx(bond, slave);
L
Linus Torvalds 已提交
2756

2757
		if (slave->link != BOND_LINK_UP) {
2758
			if (bond_time_in_interval(bond, last_rx, 1)) {
2759 2760 2761 2762 2763
				slave->new_link = BOND_LINK_UP;
				commit++;
			}
			continue;
		}
L
Linus Torvalds 已提交
2764

2765
		/* Give slaves 2*delta after being enslaved or made
2766 2767 2768
		 * active.  This avoids bouncing, as the last receive
		 * times need a full ARP monitor cycle to be updated.
		 */
2769
		if (bond_time_in_interval(bond, slave->last_link_up, 2))
2770 2771
			continue;

2772
		/* Backup slave is down if:
2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783
		 * - No current_arp_slave AND
		 * - more than 3*delta since last receive AND
		 * - the bond has an IP address
		 *
		 * Note: a non-null current_arp_slave indicates
		 * the curr_active_slave went down and we are
		 * searching for a new one; under this condition
		 * we only take the curr_active_slave down - this
		 * gives each slave a chance to tx/rx traffic
		 * before being taken out
		 */
J
Jiri Pirko 已提交
2784
		if (!bond_is_active_slave(slave) &&
2785
		    !rcu_access_pointer(bond->current_arp_slave) &&
2786
		    !bond_time_in_interval(bond, last_rx, 3)) {
2787 2788 2789 2790
			slave->new_link = BOND_LINK_DOWN;
			commit++;
		}

2791
		/* Active slave is down if:
2792 2793 2794 2795
		 * - more than 2*delta since transmitting OR
		 * - (more than 2*delta since receive AND
		 *    the bond has an IP address)
		 */
2796
		trans_start = dev_trans_start(slave->dev);
J
Jiri Pirko 已提交
2797
		if (bond_is_active_slave(slave) &&
2798 2799
		    (!bond_time_in_interval(bond, trans_start, 2) ||
		     !bond_time_in_interval(bond, last_rx, 2))) {
2800 2801 2802
			slave->new_link = BOND_LINK_DOWN;
			commit++;
		}
L
Linus Torvalds 已提交
2803 2804
	}

2805 2806
	return commit;
}
L
Linus Torvalds 已提交
2807

2808
/* Called to commit link state changes noted by inspection step of
2809 2810
 * active-backup mode ARP monitor.
 *
2811
 * Called with RTNL hold.
2812
 */
2813
static void bond_ab_arp_commit(struct bonding *bond)
2814
{
2815
	unsigned long trans_start;
2816
	struct list_head *iter;
2817
	struct slave *slave;
L
Linus Torvalds 已提交
2818

2819
	bond_for_each_slave(bond, slave, iter) {
2820 2821 2822
		switch (slave->new_link) {
		case BOND_LINK_NOCHANGE:
			continue;
2823

2824
		case BOND_LINK_UP:
2825
			trans_start = dev_trans_start(slave->dev);
2826 2827
			if (rtnl_dereference(bond->curr_active_slave) != slave ||
			    (!rtnl_dereference(bond->curr_active_slave) &&
2828
			     bond_time_in_interval(bond, trans_start, 1))) {
2829 2830 2831
				struct slave *current_arp_slave;

				current_arp_slave = rtnl_dereference(bond->current_arp_slave);
2832 2833
				bond_set_slave_link_state(slave, BOND_LINK_UP,
							  BOND_SLAVE_NOTIFY_NOW);
2834
				if (current_arp_slave) {
2835
					bond_set_slave_inactive_flags(
2836
						current_arp_slave,
2837
						BOND_SLAVE_NOTIFY_NOW);
2838
					RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2839
				}
2840

2841 2842
				netdev_info(bond->dev, "link status definitely up for interface %s\n",
					    slave->dev->name);
2843

2844
				if (!rtnl_dereference(bond->curr_active_slave) ||
2845
				    slave == rtnl_dereference(bond->primary_slave))
2846
					goto do_failover;
L
Linus Torvalds 已提交
2847

2848
			}
L
Linus Torvalds 已提交
2849

2850
			continue;
L
Linus Torvalds 已提交
2851

2852 2853 2854 2855
		case BOND_LINK_DOWN:
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;

2856 2857
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
2858 2859
			bond_set_slave_inactive_flags(slave,
						      BOND_SLAVE_NOTIFY_NOW);
2860

2861 2862
			netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
				    slave->dev->name);
2863

2864
			if (slave == rtnl_dereference(bond->curr_active_slave)) {
2865
				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2866
				goto do_failover;
L
Linus Torvalds 已提交
2867
			}
2868 2869

			continue;
2870 2871

		default:
2872 2873
			netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
				   slave->new_link, slave->dev->name);
2874
			continue;
L
Linus Torvalds 已提交
2875 2876
		}

2877
do_failover:
2878
		block_netpoll_tx();
2879
		bond_select_active_slave(bond);
2880
		unblock_netpoll_tx();
2881
	}
L
Linus Torvalds 已提交
2882

2883 2884
	bond_set_carrier(bond);
}
L
Linus Torvalds 已提交
2885

2886
/* Send ARP probes for active-backup mode ARP monitor.
2887
 *
2888
 * Called with rcu_read_lock held.
2889
 */
2890
static bool bond_ab_arp_probe(struct bonding *bond)
2891
{
2892
	struct slave *slave, *before = NULL, *new_slave = NULL,
2893 2894
		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
		     *curr_active_slave = rcu_dereference(bond->curr_active_slave);
2895 2896
	struct list_head *iter;
	bool found = false;
2897
	bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
2898

2899
	if (curr_arp_slave && curr_active_slave)
2900 2901 2902
		netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
			    curr_arp_slave->dev->name,
			    curr_active_slave->dev->name);
L
Linus Torvalds 已提交
2903

2904 2905
	if (curr_active_slave) {
		bond_arp_send_all(bond, curr_active_slave);
2906
		return should_notify_rtnl;
2907
	}
L
Linus Torvalds 已提交
2908

2909 2910 2911 2912
	/* if we don't have a curr_active_slave, search for the next available
	 * backup slave from the current_arp_slave and make it the candidate
	 * for becoming the curr_active_slave
	 */
L
Linus Torvalds 已提交
2913

2914
	if (!curr_arp_slave) {
2915 2916 2917
		curr_arp_slave = bond_first_slave_rcu(bond);
		if (!curr_arp_slave)
			return should_notify_rtnl;
2918
	}
L
Linus Torvalds 已提交
2919

2920
	bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
2921

2922
	bond_for_each_slave_rcu(bond, slave, iter) {
2923
		if (!found && !before && bond_slave_is_up(slave))
2924
			before = slave;
L
Linus Torvalds 已提交
2925

2926
		if (found && !new_slave && bond_slave_is_up(slave))
2927
			new_slave = slave;
2928 2929 2930 2931 2932 2933
		/* if the link state is up at this point, we
		 * mark it down - this can happen if we have
		 * simultaneous link failures and
		 * reselect_active_interface doesn't make this
		 * one the current slave so it is still marked
		 * up when it is actually down
L
Linus Torvalds 已提交
2934
		 */
2935
		if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
2936 2937
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_LATER);
2938 2939
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;
L
Linus Torvalds 已提交
2940

2941
			bond_set_slave_inactive_flags(slave,
2942
						      BOND_SLAVE_NOTIFY_LATER);
2943

2944 2945
			netdev_info(bond->dev, "backup interface %s is now down\n",
				    slave->dev->name);
L
Linus Torvalds 已提交
2946
		}
2947
		if (slave == curr_arp_slave)
2948
			found = true;
2949
	}
2950 2951 2952 2953

	if (!new_slave && before)
		new_slave = before;

2954 2955
	if (!new_slave)
		goto check_state;
2956

2957 2958
	bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
				  BOND_SLAVE_NOTIFY_LATER);
2959
	bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
2960
	bond_arp_send_all(bond, new_slave);
2961
	new_slave->last_link_up = jiffies;
2962
	rcu_assign_pointer(bond->current_arp_slave, new_slave);
2963

2964 2965
check_state:
	bond_for_each_slave_rcu(bond, slave, iter) {
2966
		if (slave->should_notify || slave->should_notify_link) {
2967 2968 2969 2970 2971
			should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
			break;
		}
	}
	return should_notify_rtnl;
2972
}
L
Linus Torvalds 已提交
2973

2974
static void bond_activebackup_arp_mon(struct bonding *bond)
2975
{
2976 2977
	bool should_notify_peers = false;
	bool should_notify_rtnl = false;
2978
	int delta_in_ticks;
L
Linus Torvalds 已提交
2979

2980 2981 2982
	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);

	if (!bond_has_slaves(bond))
2983 2984
		goto re_arm;

2985
	rcu_read_lock();
2986

2987 2988
	should_notify_peers = bond_should_notify_peers(bond);

2989 2990 2991
	if (bond_ab_arp_inspect(bond)) {
		rcu_read_unlock();

2992 2993 2994 2995 2996 2997
		/* Race avoidance with bond_close flush of workqueue */
		if (!rtnl_trylock()) {
			delta_in_ticks = 1;
			should_notify_peers = false;
			goto re_arm;
		}
2998

2999
		bond_ab_arp_commit(bond);
3000

3001
		rtnl_unlock();
3002
		rcu_read_lock();
3003 3004
	}

3005 3006
	should_notify_rtnl = bond_ab_arp_probe(bond);
	rcu_read_unlock();
3007

3008 3009
re_arm:
	if (bond->params.arp_interval)
3010 3011
		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);

3012
	if (should_notify_peers || should_notify_rtnl) {
3013 3014
		if (!rtnl_trylock())
			return;
3015 3016 3017 3018

		if (should_notify_peers)
			call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
						 bond->dev);
3019
		if (should_notify_rtnl) {
3020
			bond_slave_state_notify(bond);
3021 3022
			bond_slave_link_notify(bond);
		}
3023

3024 3025
		rtnl_unlock();
	}
L
Linus Torvalds 已提交
3026 3027
}

3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038
static void bond_arp_monitor(struct work_struct *work)
{
	struct bonding *bond = container_of(work, struct bonding,
					    arp_work.work);

	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
		bond_activebackup_arp_mon(bond);
	else
		bond_loadbalance_arp_mon(bond);
}

L
Linus Torvalds 已提交
3039 3040
/*-------------------------- netdev event handling --------------------------*/

3041
/* Change device name */
L
Linus Torvalds 已提交
3042 3043 3044 3045
static int bond_event_changename(struct bonding *bond)
{
	bond_remove_proc_entry(bond);
	bond_create_proc_entry(bond);
3046

3047 3048
	bond_debug_reregister(bond);

L
Linus Torvalds 已提交
3049 3050 3051
	return NOTIFY_DONE;
}

S
Stephen Hemminger 已提交
3052 3053
static int bond_master_netdev_event(unsigned long event,
				    struct net_device *bond_dev)
L
Linus Torvalds 已提交
3054
{
3055
	struct bonding *event_bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3056 3057 3058 3059

	switch (event) {
	case NETDEV_CHANGENAME:
		return bond_event_changename(event_bond);
3060 3061 3062 3063 3064 3065
	case NETDEV_UNREGISTER:
		bond_remove_proc_entry(event_bond);
		break;
	case NETDEV_REGISTER:
		bond_create_proc_entry(event_bond);
		break;
3066 3067 3068 3069
	case NETDEV_NOTIFY_PEERS:
		if (event_bond->send_peer_notif)
			event_bond->send_peer_notif--;
		break;
L
Linus Torvalds 已提交
3070 3071 3072 3073 3074 3075 3076
	default:
		break;
	}

	return NOTIFY_DONE;
}

S
Stephen Hemminger 已提交
3077 3078
static int bond_slave_netdev_event(unsigned long event,
				   struct net_device *slave_dev)
L
Linus Torvalds 已提交
3079
{
3080
	struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3081 3082
	struct bonding *bond;
	struct net_device *bond_dev;
L
Linus Torvalds 已提交
3083

3084 3085 3086 3087 3088 3089 3090 3091
	/* A netdev event can be generated while enslaving a device
	 * before netdev_rx_handler_register is called in which case
	 * slave will be NULL
	 */
	if (!slave)
		return NOTIFY_DONE;
	bond_dev = slave->bond->dev;
	bond = slave->bond;
3092
	primary = rtnl_dereference(bond->primary_slave);
3093

L
Linus Torvalds 已提交
3094 3095
	switch (event) {
	case NETDEV_UNREGISTER:
3096
		if (bond_dev->type != ARPHRD_ETHER)
3097 3098
			bond_release_and_destroy(bond_dev, slave_dev);
		else
3099
			__bond_release_one(bond_dev, slave_dev, false, true);
L
Linus Torvalds 已提交
3100
		break;
3101
	case NETDEV_UP:
L
Linus Torvalds 已提交
3102
	case NETDEV_CHANGE:
3103 3104 3105 3106 3107 3108 3109 3110 3111 3112
		/* For 802.3ad mode only:
		 * Getting invalid Speed/Duplex values here will put slave
		 * in weird state. So mark it as link-down for the time
		 * being and let link-monitoring (miimon) set it right when
		 * correct speeds/duplex are available.
		 */
		if (bond_update_speed_duplex(slave) &&
		    BOND_MODE(bond) == BOND_MODE_8023AD)
			slave->link = BOND_LINK_DOWN;

3113 3114
		if (BOND_MODE(bond) == BOND_MODE_8023AD)
			bond_3ad_adapter_speed_duplex_changed(slave);
M
Mahesh Bandewar 已提交
3115 3116
		/* Fallthrough */
	case NETDEV_DOWN:
3117 3118 3119 3120 3121 3122 3123 3124
		/* Refresh slave-array if applicable!
		 * If the setup does not use miimon or arpmon (mode-specific!),
		 * then these events will not cause the slave-array to be
		 * refreshed. This will cause xmit to use a slave that is not
		 * usable. Avoid such situation by refeshing the array at these
		 * events. If these (miimon/arpmon) parameters are configured
		 * then array gets refreshed twice and that should be fine!
		 */
3125
		if (bond_mode_can_use_xmit_hash(bond))
3126
			bond_update_slave_arr(bond, NULL);
L
Linus Torvalds 已提交
3127 3128
		break;
	case NETDEV_CHANGEMTU:
3129
		/* TODO: Should slaves be allowed to
L
Linus Torvalds 已提交
3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
		 * independently alter their MTU?  For
		 * an active-backup bond, slaves need
		 * not be the same type of device, so
		 * MTUs may vary.  For other modes,
		 * slaves arguably should have the
		 * same MTUs. To do this, we'd need to
		 * take over the slave's change_mtu
		 * function for the duration of their
		 * servitude.
		 */
		break;
	case NETDEV_CHANGENAME:
3142
		/* we don't care if we don't have primary set */
3143
		if (!bond_uses_primary(bond) ||
3144 3145 3146
		    !bond->params.primary[0])
			break;

3147
		if (slave == primary) {
3148
			/* slave's name changed - he's no longer primary */
3149
			RCU_INIT_POINTER(bond->primary_slave, NULL);
3150 3151
		} else if (!strcmp(slave_dev->name, bond->params.primary)) {
			/* we have a new primary slave */
3152
			rcu_assign_pointer(bond->primary_slave, slave);
3153 3154 3155 3156
		} else { /* we didn't change primary - exit */
			break;
		}

3157
		netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3158
			    primary ? slave_dev->name : "none");
3159 3160

		block_netpoll_tx();
3161
		bond_select_active_slave(bond);
3162
		unblock_netpoll_tx();
L
Linus Torvalds 已提交
3163
		break;
3164 3165 3166
	case NETDEV_FEAT_CHANGE:
		bond_compute_features(bond);
		break;
3167 3168 3169 3170
	case NETDEV_RESEND_IGMP:
		/* Propagate to master device */
		call_netdevice_notifiers(event, slave->bond->dev);
		break;
L
Linus Torvalds 已提交
3171 3172 3173 3174 3175 3176 3177
	default:
		break;
	}

	return NOTIFY_DONE;
}

3178
/* bond_netdev_event: handle netdev notifier chain events.
L
Linus Torvalds 已提交
3179 3180
 *
 * This function receives events for the netdev chain.  The caller (an
3181
 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
L
Linus Torvalds 已提交
3182 3183 3184
 * locks for us to safely manipulate the slave devices (RTNL lock,
 * dev_probe_lock).
 */
S
Stephen Hemminger 已提交
3185 3186
static int bond_netdev_event(struct notifier_block *this,
			     unsigned long event, void *ptr)
L
Linus Torvalds 已提交
3187
{
3188
	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
L
Linus Torvalds 已提交
3189

3190
	netdev_dbg(event_dev, "event: %lx\n", event);
L
Linus Torvalds 已提交
3191

3192 3193 3194
	if (!(event_dev->priv_flags & IFF_BONDING))
		return NOTIFY_DONE;

L
Linus Torvalds 已提交
3195
	if (event_dev->flags & IFF_MASTER) {
3196
		netdev_dbg(event_dev, "IFF_MASTER\n");
L
Linus Torvalds 已提交
3197 3198 3199 3200
		return bond_master_netdev_event(event, event_dev);
	}

	if (event_dev->flags & IFF_SLAVE) {
3201
		netdev_dbg(event_dev, "IFF_SLAVE\n");
L
Linus Torvalds 已提交
3202 3203 3204 3205 3206 3207 3208 3209 3210 3211
		return bond_slave_netdev_event(event, event_dev);
	}

	return NOTIFY_DONE;
}

static struct notifier_block bond_netdev_notifier = {
	.notifier_call = bond_netdev_event,
};

3212 3213
/*---------------------------- Hashing Policies -----------------------------*/

3214 3215
/* L2 hash helper */
static inline u32 bond_eth_hash(struct sk_buff *skb)
3216
{
3217
	struct ethhdr *ep, hdr_tmp;
3218

3219 3220 3221
	ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
	if (ep)
		return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
3222 3223 3224
	return 0;
}

3225 3226 3227
/* Extract the appropriate headers based on bond's xmit policy */
static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
			      struct flow_keys *fk)
3228
{
3229
	const struct ipv6hdr *iph6;
3230
	const struct iphdr *iph;
3231
	int noff, proto = -1;
3232

3233
	if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
3234
		return skb_flow_dissect_flow_keys(skb, fk, 0);
3235

3236
	fk->ports.ports = 0;
3237 3238
	noff = skb_network_offset(skb);
	if (skb->protocol == htons(ETH_P_IP)) {
3239
		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
3240
			return false;
3241
		iph = ip_hdr(skb);
3242
		iph_to_flow_copy_v4addrs(fk, iph);
3243 3244 3245 3246
		noff += iph->ihl << 2;
		if (!ip_is_fragment(iph))
			proto = iph->protocol;
	} else if (skb->protocol == htons(ETH_P_IPV6)) {
3247
		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
3248 3249
			return false;
		iph6 = ipv6_hdr(skb);
3250
		iph_to_flow_copy_v6addrs(fk, iph6);
3251 3252 3253 3254
		noff += sizeof(*iph6);
		proto = iph6->nexthdr;
	} else {
		return false;
3255
	}
3256
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
3257
		fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
3258

3259
	return true;
3260 3261
}

3262 3263 3264 3265 3266 3267 3268
/**
 * bond_xmit_hash - generate a hash value based on the xmit policy
 * @bond: bonding device
 * @skb: buffer to use for headers
 *
 * This function will extract the necessary headers from the skb buffer and use
 * them to generate a hash based on the xmit_policy set in the bonding device
3269
 */
3270
u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
3271
{
3272 3273
	struct flow_keys flow;
	u32 hash;
3274

E
Eric Dumazet 已提交
3275 3276 3277 3278
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
	    skb->l4_hash)
		return skb->hash;

3279 3280
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
	    !bond_flow_dissect(bond, skb, &flow))
3281
		return bond_eth_hash(skb);
3282

3283 3284 3285 3286
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
		hash = bond_eth_hash(skb);
	else
3287
		hash = (__force u32)flow.ports.ports;
3288 3289
	hash ^= (__force u32)flow_get_u32_dst(&flow) ^
		(__force u32)flow_get_u32_src(&flow);
3290 3291 3292
	hash ^= (hash >> 16);
	hash ^= (hash >> 8);

3293
	return hash >> 1;
3294 3295
}

L
Linus Torvalds 已提交
3296 3297
/*-------------------------- Device entry points ----------------------------*/

3298
void bond_work_init_all(struct bonding *bond)
3299 3300 3301 3302 3303
{
	INIT_DELAYED_WORK(&bond->mcast_work,
			  bond_resend_igmp_join_requests_delayed);
	INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
	INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3304
	INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
3305
	INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3306
	INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
3307 3308 3309 3310 3311 3312 3313 3314 3315
}

static void bond_work_cancel_all(struct bonding *bond)
{
	cancel_delayed_work_sync(&bond->mii_work);
	cancel_delayed_work_sync(&bond->arp_work);
	cancel_delayed_work_sync(&bond->alb_work);
	cancel_delayed_work_sync(&bond->ad_work);
	cancel_delayed_work_sync(&bond->mcast_work);
3316
	cancel_delayed_work_sync(&bond->slave_arr_work);
3317 3318
}

L
Linus Torvalds 已提交
3319 3320
static int bond_open(struct net_device *bond_dev)
{
3321
	struct bonding *bond = netdev_priv(bond_dev);
3322
	struct list_head *iter;
3323
	struct slave *slave;
L
Linus Torvalds 已提交
3324

3325
	/* reset slave->backup and slave->inactive */
3326
	if (bond_has_slaves(bond)) {
3327
		bond_for_each_slave(bond, slave, iter) {
3328 3329
			if (bond_uses_primary(bond) &&
			    slave != rcu_access_pointer(bond->curr_active_slave)) {
3330 3331
				bond_set_slave_inactive_flags(slave,
							      BOND_SLAVE_NOTIFY_NOW);
3332
			} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
3333 3334
				bond_set_slave_active_flags(slave,
							    BOND_SLAVE_NOTIFY_NOW);
3335 3336 3337 3338
			}
		}
	}

3339
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
3340 3341 3342
		/* bond_alb_initialize must be called before the timer
		 * is started.
		 */
3343
		if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3344
			return -ENOMEM;
3345
		if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
3346
			queue_delayed_work(bond->wq, &bond->alb_work, 0);
L
Linus Torvalds 已提交
3347 3348
	}

3349
	if (bond->params.miimon)  /* link check interval, in milliseconds. */
3350
		queue_delayed_work(bond->wq, &bond->mii_work, 0);
L
Linus Torvalds 已提交
3351 3352

	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
3353
		queue_delayed_work(bond->wq, &bond->arp_work, 0);
3354
		bond->recv_probe = bond_arp_rcv;
L
Linus Torvalds 已提交
3355 3356
	}

3357
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3358
		queue_delayed_work(bond->wq, &bond->ad_work, 0);
L
Linus Torvalds 已提交
3359
		/* register to receive LACPDUs */
3360
		bond->recv_probe = bond_3ad_lacpdu_recv;
3361
		bond_3ad_initiate_agg_selection(bond, 1);
L
Linus Torvalds 已提交
3362 3363
	}

3364
	if (bond_mode_can_use_xmit_hash(bond))
3365 3366
		bond_update_slave_arr(bond, NULL);

L
Linus Torvalds 已提交
3367 3368 3369 3370 3371
	return 0;
}

static int bond_close(struct net_device *bond_dev)
{
3372
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3373

3374
	bond_work_cancel_all(bond);
3375
	bond->send_peer_notif = 0;
3376
	if (bond_is_lb(bond))
L
Linus Torvalds 已提交
3377
		bond_alb_deinitialize(bond);
3378
	bond->recv_probe = NULL;
L
Linus Torvalds 已提交
3379 3380 3381 3382

	return 0;
}

E
Eric Dumazet 已提交
3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397
/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
 * that some drivers can provide 32bit values only.
 */
static void bond_fold_stats(struct rtnl_link_stats64 *_res,
			    const struct rtnl_link_stats64 *_new,
			    const struct rtnl_link_stats64 *_old)
{
	const u64 *new = (const u64 *)_new;
	const u64 *old = (const u64 *)_old;
	u64 *res = (u64 *)_res;
	int i;

	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
		u64 nv = new[i];
		u64 ov = old[i];
3398
		s64 delta = nv - ov;
E
Eric Dumazet 已提交
3399 3400 3401

		/* detects if this particular field is 32bit only */
		if (((nv | ov) >> 32) == 0)
3402 3403 3404 3405 3406 3407 3408
			delta = (s64)(s32)((u32)nv - (u32)ov);

		/* filter anomalies, some drivers reset their stats
		 * at down/up events.
		 */
		if (delta > 0)
			res[i] += delta;
E
Eric Dumazet 已提交
3409 3410 3411
	}
}

3412 3413 3414 3415 3416 3417 3418
static int bond_get_nest_level(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);

	return bond->nest_level;
}

3419 3420
static void bond_get_stats(struct net_device *bond_dev,
			   struct rtnl_link_stats64 *stats)
L
Linus Torvalds 已提交
3421
{
3422
	struct bonding *bond = netdev_priv(bond_dev);
3423
	struct rtnl_link_stats64 temp;
3424
	struct list_head *iter;
L
Linus Torvalds 已提交
3425 3426
	struct slave *slave;

3427
	spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
3428
	memcpy(stats, &bond->bond_stats, sizeof(*stats));
L
Linus Torvalds 已提交
3429

E
Eric Dumazet 已提交
3430 3431 3432
	rcu_read_lock();
	bond_for_each_slave_rcu(bond, slave, iter) {
		const struct rtnl_link_stats64 *new =
3433
			dev_get_stats(slave->dev, &temp);
E
Eric Dumazet 已提交
3434 3435

		bond_fold_stats(stats, new, &slave->slave_stats);
3436 3437

		/* save off the slave stats for the next run */
E
Eric Dumazet 已提交
3438
		memcpy(&slave->slave_stats, new, sizeof(*new));
3439
	}
E
Eric Dumazet 已提交
3440 3441
	rcu_read_unlock();

3442
	memcpy(&bond->bond_stats, stats, sizeof(*stats));
E
Eric Dumazet 已提交
3443
	spin_unlock(&bond->stats_lock);
L
Linus Torvalds 已提交
3444 3445 3446 3447
}

static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
3448
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3449 3450 3451 3452 3453 3454
	struct net_device *slave_dev = NULL;
	struct ifbond k_binfo;
	struct ifbond __user *u_binfo = NULL;
	struct ifslave k_sinfo;
	struct ifslave __user *u_sinfo = NULL;
	struct mii_ioctl_data *mii = NULL;
3455
	struct bond_opt_value newval;
3456
	struct net *net;
L
Linus Torvalds 已提交
3457 3458
	int res = 0;

3459
	netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
L
Linus Torvalds 已提交
3460 3461 3462 3463

	switch (cmd) {
	case SIOCGMIIPHY:
		mii = if_mii(ifr);
S
Stephen Hemminger 已提交
3464
		if (!mii)
L
Linus Torvalds 已提交
3465
			return -EINVAL;
S
Stephen Hemminger 已提交
3466

L
Linus Torvalds 已提交
3467 3468 3469
		mii->phy_id = 0;
		/* Fall Through */
	case SIOCGMIIREG:
3470
		/* We do this again just in case we were called by SIOCGMIIREG
L
Linus Torvalds 已提交
3471 3472 3473
		 * instead of SIOCGMIIPHY.
		 */
		mii = if_mii(ifr);
S
Stephen Hemminger 已提交
3474
		if (!mii)
L
Linus Torvalds 已提交
3475
			return -EINVAL;
S
Stephen Hemminger 已提交
3476

L
Linus Torvalds 已提交
3477 3478
		if (mii->reg_num == 1) {
			mii->val_out = 0;
S
Stephen Hemminger 已提交
3479
			if (netif_carrier_ok(bond->dev))
L
Linus Torvalds 已提交
3480 3481 3482 3483 3484 3485 3486 3487
				mii->val_out = BMSR_LSTATUS;
		}

		return 0;
	case BOND_INFO_QUERY_OLD:
	case SIOCBONDINFOQUERY:
		u_binfo = (struct ifbond __user *)ifr->ifr_data;

S
Stephen Hemminger 已提交
3488
		if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
L
Linus Torvalds 已提交
3489 3490
			return -EFAULT;

3491 3492
		bond_info_query(bond_dev, &k_binfo);
		if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
S
Stephen Hemminger 已提交
3493
			return -EFAULT;
L
Linus Torvalds 已提交
3494

3495
		return 0;
L
Linus Torvalds 已提交
3496 3497 3498 3499
	case BOND_SLAVE_INFO_QUERY_OLD:
	case SIOCBONDSLAVEINFOQUERY:
		u_sinfo = (struct ifslave __user *)ifr->ifr_data;

S
Stephen Hemminger 已提交
3500
		if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
L
Linus Torvalds 已提交
3501 3502 3503
			return -EFAULT;

		res = bond_slave_info_query(bond_dev, &k_sinfo);
S
Stephen Hemminger 已提交
3504 3505 3506
		if (res == 0 &&
		    copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
			return -EFAULT;
L
Linus Torvalds 已提交
3507 3508 3509 3510 3511 3512

		return res;
	default:
		break;
	}

3513 3514 3515
	net = dev_net(bond_dev);

	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
L
Linus Torvalds 已提交
3516 3517
		return -EPERM;

3518
	slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
L
Linus Torvalds 已提交
3519

3520
	netdev_dbg(bond_dev, "slave_dev=%p:\n", slave_dev);
L
Linus Torvalds 已提交
3521

S
Stephen Hemminger 已提交
3522
	if (!slave_dev)
3523
		return -ENODEV;
L
Linus Torvalds 已提交
3524

3525
	netdev_dbg(bond_dev, "slave_dev->name=%s:\n", slave_dev->name);
3526 3527 3528
	switch (cmd) {
	case BOND_ENSLAVE_OLD:
	case SIOCBONDENSLAVE:
D
David Ahern 已提交
3529
		res = bond_enslave(bond_dev, slave_dev, NULL);
3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541
		break;
	case BOND_RELEASE_OLD:
	case SIOCBONDRELEASE:
		res = bond_release(bond_dev, slave_dev);
		break;
	case BOND_SETHWADDR_OLD:
	case SIOCBONDSETHWADDR:
		bond_set_dev_addr(bond_dev, slave_dev);
		res = 0;
		break;
	case BOND_CHANGE_ACTIVE_OLD:
	case SIOCBONDCHANGEACTIVE:
3542
		bond_opt_initstr(&newval, slave_dev->name);
3543 3544
		res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
					    &newval);
3545 3546 3547
		break;
	default:
		res = -EOPNOTSUPP;
L
Linus Torvalds 已提交
3548 3549 3550 3551 3552
	}

	return res;
}

3553
static void bond_change_rx_flags(struct net_device *bond_dev, int change)
L
Linus Torvalds 已提交
3554
{
3555
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3556

3557 3558 3559
	if (change & IFF_PROMISC)
		bond_set_promiscuity(bond,
				     bond_dev->flags & IFF_PROMISC ? 1 : -1);
S
Stephen Hemminger 已提交
3560

3561 3562 3563 3564
	if (change & IFF_ALLMULTI)
		bond_set_allmulti(bond,
				  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
}
L
Linus Torvalds 已提交
3565

3566
static void bond_set_rx_mode(struct net_device *bond_dev)
3567 3568
{
	struct bonding *bond = netdev_priv(bond_dev);
3569
	struct list_head *iter;
3570
	struct slave *slave;
L
Linus Torvalds 已提交
3571

3572
	rcu_read_lock();
3573
	if (bond_uses_primary(bond)) {
3574
		slave = rcu_dereference(bond->curr_active_slave);
3575 3576 3577 3578 3579
		if (slave) {
			dev_uc_sync(slave->dev, bond_dev);
			dev_mc_sync(slave->dev, bond_dev);
		}
	} else {
3580
		bond_for_each_slave_rcu(bond, slave, iter) {
3581 3582 3583
			dev_uc_sync_multiple(slave->dev, bond_dev);
			dev_mc_sync_multiple(slave->dev, bond_dev);
		}
L
Linus Torvalds 已提交
3584
	}
3585
	rcu_read_unlock();
L
Linus Torvalds 已提交
3586 3587
}

3588
static int bond_neigh_init(struct neighbour *n)
3589
{
3590 3591 3592
	struct bonding *bond = netdev_priv(n->dev);
	const struct net_device_ops *slave_ops;
	struct neigh_parms parms;
3593
	struct slave *slave;
3594 3595
	int ret;

3596
	slave = bond_first_slave(bond);
3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608
	if (!slave)
		return 0;
	slave_ops = slave->dev->netdev_ops;
	if (!slave_ops->ndo_neigh_setup)
		return 0;

	parms.neigh_setup = NULL;
	parms.neigh_cleanup = NULL;
	ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
	if (ret)
		return ret;

3609
	/* Assign slave's neigh_cleanup to neighbour in case cleanup is called
3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621
	 * after the last slave has been detached.  Assumes that all slaves
	 * utilize the same neigh_cleanup (true at this writing as only user
	 * is ipoib).
	 */
	n->parms->neigh_cleanup = parms.neigh_cleanup;

	if (!parms.neigh_setup)
		return 0;

	return parms.neigh_setup(n);
}

3622
/* The bonding ndo_neigh_setup is called at init time beofre any
3623 3624
 * slave exists. So we must declare proxy setup function which will
 * be used at run time to resolve the actual slave neigh param setup.
3625 3626 3627 3628
 *
 * It's also called by master devices (such as vlans) to setup their
 * underlying devices. In that case - do nothing, we're already set up from
 * our init.
3629 3630 3631 3632
 */
static int bond_neigh_setup(struct net_device *dev,
			    struct neigh_parms *parms)
{
3633 3634 3635
	/* modify only our neigh_parms */
	if (parms->dev == dev)
		parms->neigh_setup = bond_neigh_init;
3636 3637 3638 3639

	return 0;
}

3640
/* Change the MTU of all of a master's slaves to match the master */
L
Linus Torvalds 已提交
3641 3642
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
3643
	struct bonding *bond = netdev_priv(bond_dev);
3644
	struct slave *slave, *rollback_slave;
3645
	struct list_head *iter;
L
Linus Torvalds 已提交
3646 3647
	int res = 0;

3648
	netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
L
Linus Torvalds 已提交
3649

3650
	bond_for_each_slave(bond, slave, iter) {
3651 3652
		netdev_dbg(bond_dev, "s %p c_m %p\n",
			   slave, slave->dev->netdev_ops->ndo_change_mtu);
3653

L
Linus Torvalds 已提交
3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664
		res = dev_set_mtu(slave->dev, new_mtu);

		if (res) {
			/* If we failed to set the slave's mtu to the new value
			 * we must abort the operation even in ACTIVE_BACKUP
			 * mode, because if we allow the backup slaves to have
			 * different mtu values than the active slave we'll
			 * need to change their mtu when doing a failover. That
			 * means changing their mtu from timer context, which
			 * is probably not a good idea.
			 */
3665 3666
			netdev_dbg(bond_dev, "err %d %s\n", res,
				   slave->dev->name);
L
Linus Torvalds 已提交
3667 3668 3669 3670 3671 3672 3673 3674 3675 3676
			goto unwind;
		}
	}

	bond_dev->mtu = new_mtu;

	return 0;

unwind:
	/* unwind from head to the slave that failed */
3677
	bond_for_each_slave(bond, rollback_slave, iter) {
L
Linus Torvalds 已提交
3678 3679
		int tmp_res;

3680 3681 3682 3683
		if (rollback_slave == slave)
			break;

		tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
L
Linus Torvalds 已提交
3684
		if (tmp_res) {
3685 3686
			netdev_dbg(bond_dev, "unwind err %d dev %s\n",
				   tmp_res, rollback_slave->dev->name);
L
Linus Torvalds 已提交
3687 3688 3689 3690 3691 3692
		}
	}

	return res;
}

3693
/* Change HW address
L
Linus Torvalds 已提交
3694 3695 3696 3697 3698 3699 3700
 *
 * Note that many devices must be down to change the HW address, and
 * downing the master releases all slaves.  We can make bonds full of
 * bonding devices to test this, however.
 */
static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
3701
	struct bonding *bond = netdev_priv(bond_dev);
3702
	struct slave *slave, *rollback_slave;
3703
	struct sockaddr_storage *ss = addr, tmp_ss;
3704
	struct list_head *iter;
L
Linus Torvalds 已提交
3705 3706
	int res = 0;

3707
	if (BOND_MODE(bond) == BOND_MODE_ALB)
3708 3709 3710
		return bond_alb_set_mac_address(bond_dev, addr);


3711
	netdev_dbg(bond_dev, "bond=%p\n", bond);
L
Linus Torvalds 已提交
3712

3713 3714
	/* If fail_over_mac is enabled, do nothing and return success.
	 * Returning an error causes ifenslave to fail.
3715
	 */
3716
	if (bond->params.fail_over_mac &&
3717
	    BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3718
		return 0;
3719

3720
	if (!is_valid_ether_addr(ss->__data))
L
Linus Torvalds 已提交
3721 3722
		return -EADDRNOTAVAIL;

3723
	bond_for_each_slave(bond, slave, iter) {
3724
		netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name);
L
Linus Torvalds 已提交
3725 3726 3727 3728 3729 3730 3731 3732
		res = dev_set_mac_address(slave->dev, addr);
		if (res) {
			/* TODO: consider downing the slave
			 * and retry ?
			 * User should expect communications
			 * breakage anyway until ARP finish
			 * updating, so...
			 */
3733
			netdev_dbg(bond_dev, "err %d %s\n", res, slave->dev->name);
L
Linus Torvalds 已提交
3734 3735 3736 3737 3738
			goto unwind;
		}
	}

	/* success */
3739
	memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
L
Linus Torvalds 已提交
3740 3741 3742
	return 0;

unwind:
3743 3744
	memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
	tmp_ss.ss_family = bond_dev->type;
L
Linus Torvalds 已提交
3745 3746

	/* unwind from head to the slave that failed */
3747
	bond_for_each_slave(bond, rollback_slave, iter) {
L
Linus Torvalds 已提交
3748 3749
		int tmp_res;

3750 3751 3752
		if (rollback_slave == slave)
			break;

3753 3754
		tmp_res = dev_set_mac_address(rollback_slave->dev,
					      (struct sockaddr *)&tmp_ss);
L
Linus Torvalds 已提交
3755
		if (tmp_res) {
3756 3757
			netdev_dbg(bond_dev, "unwind err %d dev %s\n",
				   tmp_res, rollback_slave->dev->name);
L
Linus Torvalds 已提交
3758 3759 3760 3761 3762 3763
		}
	}

	return res;
}

3764 3765 3766 3767 3768 3769 3770 3771 3772 3773
/**
 * bond_xmit_slave_id - transmit skb through slave with slave_id
 * @bond: bonding device that is transmitting
 * @skb: buffer to transmit
 * @slave_id: slave id up to slave_cnt-1 through which to transmit
 *
 * This function tries to transmit through slave with slave_id but in case
 * it fails, it tries to find the first available slave for transmission.
 * The skb is consumed in all cases, thus the function is void.
 */
3774
static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
3775
{
3776
	struct list_head *iter;
3777 3778 3779 3780
	struct slave *slave;
	int i = slave_id;

	/* Here we start from the slave with slave_id */
3781
	bond_for_each_slave_rcu(bond, slave, iter) {
3782
		if (--i < 0) {
3783
			if (bond_slave_can_tx(slave)) {
3784 3785 3786 3787 3788 3789 3790 3791
				bond_dev_queue_xmit(bond, skb, slave->dev);
				return;
			}
		}
	}

	/* Here we start from the first slave up to slave_id */
	i = slave_id;
3792
	bond_for_each_slave_rcu(bond, slave, iter) {
3793 3794
		if (--i < 0)
			break;
3795
		if (bond_slave_can_tx(slave)) {
3796 3797 3798 3799 3800
			bond_dev_queue_xmit(bond, skb, slave->dev);
			return;
		}
	}
	/* no slave that can tx has been found */
E
Eric Dumazet 已提交
3801
	bond_tx_drop(bond->dev, skb);
3802 3803
}

3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814
/**
 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
 * @bond: bonding device to use
 *
 * Based on the value of the bonding device's packets_per_slave parameter
 * this function generates a slave id, which is usually used as the next
 * slave to transmit through.
 */
static u32 bond_rr_gen_slave_id(struct bonding *bond)
{
	u32 slave_id;
3815 3816
	struct reciprocal_value reciprocal_packets_per_slave;
	int packets_per_slave = bond->params.packets_per_slave;
3817 3818 3819 3820 3821 3822 3823 3824 3825

	switch (packets_per_slave) {
	case 0:
		slave_id = prandom_u32();
		break;
	case 1:
		slave_id = bond->rr_tx_counter;
		break;
	default:
3826 3827
		reciprocal_packets_per_slave =
			bond->params.reciprocal_packets_per_slave;
3828
		slave_id = reciprocal_divide(bond->rr_tx_counter,
3829
					     reciprocal_packets_per_slave);
3830 3831 3832 3833 3834 3835 3836
		break;
	}
	bond->rr_tx_counter++;

	return slave_id;
}

3837 3838
static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
					struct net_device *bond_dev)
L
Linus Torvalds 已提交
3839
{
3840
	struct bonding *bond = netdev_priv(bond_dev);
3841
	struct iphdr *iph = ip_hdr(skb);
3842
	struct slave *slave;
3843
	u32 slave_id;
L
Linus Torvalds 已提交
3844

3845
	/* Start with the curr_active_slave that joined the bond as the
3846 3847 3848 3849
	 * default for sending IGMP traffic.  For failover purposes one
	 * needs to maintain some consistency for the interface that will
	 * send the join/membership reports.  The curr_active_slave found
	 * will send all of this type of traffic.
3850
	 */
3851
	if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
3852
		slave = rcu_dereference(bond->curr_active_slave);
3853
		if (slave)
3854 3855 3856
			bond_dev_queue_xmit(bond, skb, slave->dev);
		else
			bond_xmit_slave_id(bond, skb, 0);
3857
	} else {
3858
		int slave_cnt = READ_ONCE(bond->slave_cnt);
3859 3860 3861 3862 3863

		if (likely(slave_cnt)) {
			slave_id = bond_rr_gen_slave_id(bond);
			bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
		} else {
E
Eric Dumazet 已提交
3864
			bond_tx_drop(bond_dev, skb);
3865
		}
L
Linus Torvalds 已提交
3866
	}
3867

3868
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
3869 3870
}

3871
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
L
Linus Torvalds 已提交
3872 3873
 * the bond has a usable interface.
 */
3874 3875
static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
					  struct net_device *bond_dev)
L
Linus Torvalds 已提交
3876
{
3877
	struct bonding *bond = netdev_priv(bond_dev);
3878
	struct slave *slave;
L
Linus Torvalds 已提交
3879

3880
	slave = rcu_dereference(bond->curr_active_slave);
3881
	if (slave)
3882 3883
		bond_dev_queue_xmit(bond, skb, slave->dev);
	else
E
Eric Dumazet 已提交
3884
		bond_tx_drop(bond_dev, skb);
3885

3886
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
3887 3888
}

3889 3890 3891
/* Use this to update slave_array when (a) it's not appropriate to update
 * slave_array right away (note that update_slave_array() may sleep)
 * and / or (b) RTNL is not held.
L
Linus Torvalds 已提交
3892
 */
3893
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
L
Linus Torvalds 已提交
3894
{
3895 3896
	queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
}
L
Linus Torvalds 已提交
3897

3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923
/* Slave array work handler. Holds only RTNL */
static void bond_slave_arr_handler(struct work_struct *work)
{
	struct bonding *bond = container_of(work, struct bonding,
					    slave_arr_work.work);
	int ret;

	if (!rtnl_trylock())
		goto err;

	ret = bond_update_slave_arr(bond, NULL);
	rtnl_unlock();
	if (ret) {
		pr_warn_ratelimited("Failed to update slave array from WT\n");
		goto err;
	}
	return;

err:
	bond_slave_arr_work_rearm(bond, 1);
}

/* Build the usable slaves array in control path for modes that use xmit-hash
 * to determine the slave interface -
 * (a) BOND_MODE_8023AD
 * (b) BOND_MODE_XOR
3924
 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976
 *
 * The caller is expected to hold RTNL only and NO other lock!
 */
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
{
	struct slave *slave;
	struct list_head *iter;
	struct bond_up_slave *new_arr, *old_arr;
	int agg_id = 0;
	int ret = 0;

#ifdef CONFIG_LOCKDEP
	WARN_ON(lockdep_is_held(&bond->mode_lock));
#endif

	new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]),
			  GFP_KERNEL);
	if (!new_arr) {
		ret = -ENOMEM;
		pr_err("Failed to build slave-array.\n");
		goto out;
	}
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
		struct ad_info ad_info;

		if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
			pr_debug("bond_3ad_get_active_agg_info failed\n");
			kfree_rcu(new_arr, rcu);
			/* No active aggragator means it's not safe to use
			 * the previous array.
			 */
			old_arr = rtnl_dereference(bond->slave_arr);
			if (old_arr) {
				RCU_INIT_POINTER(bond->slave_arr, NULL);
				kfree_rcu(old_arr, rcu);
			}
			goto out;
		}
		agg_id = ad_info.aggregator_id;
	}
	bond_for_each_slave(bond, slave, iter) {
		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			struct aggregator *agg;

			agg = SLAVE_AD_INFO(slave)->port.aggregator;
			if (!agg || agg->aggregator_identifier != agg_id)
				continue;
		}
		if (!bond_slave_can_tx(slave))
			continue;
		if (skipslave == slave)
			continue;
3977 3978 3979 3980 3981

		netdev_dbg(bond->dev,
			   "Adding slave dev %s to tx hash array[%d]\n",
			   slave->dev->name, new_arr->count);

3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017
		new_arr->arr[new_arr->count++] = slave;
	}

	old_arr = rtnl_dereference(bond->slave_arr);
	rcu_assign_pointer(bond->slave_arr, new_arr);
	if (old_arr)
		kfree_rcu(old_arr, rcu);
out:
	if (ret != 0 && skipslave) {
		int idx;

		/* Rare situation where caller has asked to skip a specific
		 * slave but allocation failed (most likely!). BTW this is
		 * only possible when the call is initiated from
		 * __bond_release_one(). In this situation; overwrite the
		 * skipslave entry in the array with the last entry from the
		 * array to avoid a situation where the xmit path may choose
		 * this to-be-skipped slave to send a packet out.
		 */
		old_arr = rtnl_dereference(bond->slave_arr);
		for (idx = 0; idx < old_arr->count; idx++) {
			if (skipslave == old_arr->arr[idx]) {
				old_arr->arr[idx] =
				    old_arr->arr[old_arr->count-1];
				old_arr->count--;
				break;
			}
		}
	}
	return ret;
}

/* Use this Xmit function for 3AD as well as XOR modes. The current
 * usable slave array is formed in the control path. The xmit function
 * just calculates hash and sends the packet out.
 */
4018 4019
static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
				     struct net_device *dev)
4020 4021 4022 4023 4024 4025 4026
{
	struct bonding *bond = netdev_priv(dev);
	struct slave *slave;
	struct bond_up_slave *slaves;
	unsigned int count;

	slaves = rcu_dereference(bond->slave_arr);
4027
	count = slaves ? READ_ONCE(slaves->count) : 0;
4028 4029 4030 4031
	if (likely(count)) {
		slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
		bond_dev_queue_xmit(bond, skb, slave->dev);
	} else {
E
Eric Dumazet 已提交
4032
		bond_tx_drop(dev, skb);
4033
	}
4034

4035
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
4036 4037
}

4038
/* in broadcast mode, we send everything to all usable interfaces. */
4039 4040
static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
				       struct net_device *bond_dev)
L
Linus Torvalds 已提交
4041
{
4042
	struct bonding *bond = netdev_priv(bond_dev);
4043
	struct slave *slave = NULL;
4044
	struct list_head *iter;
L
Linus Torvalds 已提交
4045

4046
	bond_for_each_slave_rcu(bond, slave, iter) {
4047 4048
		if (bond_is_last_slave(bond, slave))
			break;
4049
		if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
4050
			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
L
Linus Torvalds 已提交
4051

4052
			if (!skb2) {
4053 4054
				net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
						    bond_dev->name, __func__);
4055
				continue;
L
Linus Torvalds 已提交
4056
			}
4057
			bond_dev_queue_xmit(bond, skb2, slave->dev);
L
Linus Torvalds 已提交
4058 4059
		}
	}
4060
	if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
4061 4062
		bond_dev_queue_xmit(bond, skb, slave->dev);
	else
E
Eric Dumazet 已提交
4063
		bond_tx_drop(bond_dev, skb);
S
Stephen Hemminger 已提交
4064

4065
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
4066 4067 4068 4069
}

/*------------------------- Device initialization ---------------------------*/

4070
/* Lookup the slave that corresponds to a qid */
4071 4072 4073 4074
static inline int bond_slave_override(struct bonding *bond,
				      struct sk_buff *skb)
{
	struct slave *slave = NULL;
4075
	struct list_head *iter;
4076

4077
	if (!skb_rx_queue_recorded(skb))
4078
		return 1;
4079 4080

	/* Find out if any slaves have the same mapping as this skb. */
4081
	bond_for_each_slave_rcu(bond, slave, iter) {
4082
		if (slave->queue_id == skb_get_queue_mapping(skb)) {
4083 4084
			if (bond_slave_is_up(slave) &&
			    slave->link == BOND_LINK_UP) {
4085 4086 4087 4088
				bond_dev_queue_xmit(bond, skb, slave->dev);
				return 0;
			}
			/* If the slave isn't UP, use default transmit policy. */
4089 4090 4091 4092
			break;
		}
	}

4093
	return 1;
4094 4095
}

4096

4097
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
4098 4099
			     struct net_device *sb_dev,
			     select_queue_fallback_t fallback)
4100
{
4101
	/* This helper function exists to help dev_pick_tx get the correct
P
Phil Oester 已提交
4102
	 * destination queue.  Using a helper function skips a call to
4103 4104 4105
	 * skb_tx_hash and will put the skbs in the queue we expect on their
	 * way down to the bonding driver.
	 */
P
Phil Oester 已提交
4106 4107
	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;

4108
	/* Save the original txq to restore before passing to the driver */
4109
	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
4110

P
Phil Oester 已提交
4111
	if (unlikely(txq >= dev->real_num_tx_queues)) {
4112
		do {
P
Phil Oester 已提交
4113
			txq -= dev->real_num_tx_queues;
4114
		} while (txq >= dev->real_num_tx_queues);
P
Phil Oester 已提交
4115 4116
	}
	return txq;
4117 4118
}

4119
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4120
{
4121 4122
	struct bonding *bond = netdev_priv(dev);

4123 4124 4125
	if (bond_should_override_tx_queue(bond) &&
	    !bond_slave_override(bond, skb))
		return NETDEV_TX_OK;
4126

4127
	switch (BOND_MODE(bond)) {
4128 4129 4130 4131
	case BOND_MODE_ROUNDROBIN:
		return bond_xmit_roundrobin(skb, dev);
	case BOND_MODE_ACTIVEBACKUP:
		return bond_xmit_activebackup(skb, dev);
4132
	case BOND_MODE_8023AD:
4133
	case BOND_MODE_XOR:
4134
		return bond_3ad_xor_xmit(skb, dev);
4135 4136 4137 4138
	case BOND_MODE_BROADCAST:
		return bond_xmit_broadcast(skb, dev);
	case BOND_MODE_ALB:
		return bond_alb_xmit(skb, dev);
4139 4140
	case BOND_MODE_TLB:
		return bond_tlb_xmit(skb, dev);
4141 4142
	default:
		/* Should never happen, mode already checked */
4143
		netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
4144
		WARN_ON_ONCE(1);
E
Eric Dumazet 已提交
4145
		bond_tx_drop(dev, skb);
4146 4147 4148 4149
		return NETDEV_TX_OK;
	}
}

4150 4151 4152 4153 4154
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct bonding *bond = netdev_priv(dev);
	netdev_tx_t ret = NETDEV_TX_OK;

4155
	/* If we risk deadlock from transmitting this in the
4156 4157
	 * netpoll path, tell netpoll to queue the frame for later tx
	 */
4158
	if (unlikely(is_netpoll_tx_blocked(dev)))
4159 4160
		return NETDEV_TX_BUSY;

4161
	rcu_read_lock();
4162
	if (bond_has_slaves(bond))
4163 4164
		ret = __bond_start_xmit(skb, dev);
	else
E
Eric Dumazet 已提交
4165
		bond_tx_drop(dev, skb);
4166
	rcu_read_unlock();
4167 4168 4169

	return ret;
}
4170

4171 4172
static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
					   struct ethtool_link_ksettings *cmd)
4173 4174 4175
{
	struct bonding *bond = netdev_priv(bond_dev);
	unsigned long speed = 0;
4176
	struct list_head *iter;
4177
	struct slave *slave;
4178

4179 4180
	cmd->base.duplex = DUPLEX_UNKNOWN;
	cmd->base.port = PORT_OTHER;
4181

4182
	/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
4183 4184 4185 4186
	 * do not need to check mode.  Though link speed might not represent
	 * the true receive or transmit bandwidth (not all modes are symmetric)
	 * this is an accurate maximum.
	 */
4187
	bond_for_each_slave(bond, slave, iter) {
4188
		if (bond_slave_can_tx(slave)) {
4189 4190
			if (slave->speed != SPEED_UNKNOWN)
				speed += slave->speed;
4191
			if (cmd->base.duplex == DUPLEX_UNKNOWN &&
4192
			    slave->duplex != DUPLEX_UNKNOWN)
4193
				cmd->base.duplex = slave->duplex;
4194 4195
		}
	}
4196
	cmd->base.speed = speed ? : SPEED_UNKNOWN;
4197

4198 4199 4200
	return 0;
}

4201
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4202
				     struct ethtool_drvinfo *drvinfo)
4203
{
4204 4205 4206 4207
	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
		 BOND_ABI_VERSION);
4208 4209
}

4210
static const struct ethtool_ops bond_ethtool_ops = {
4211
	.get_drvinfo		= bond_ethtool_get_drvinfo,
4212
	.get_link		= ethtool_op_get_link,
4213
	.get_link_ksettings	= bond_ethtool_get_link_ksettings,
4214 4215
};

4216
static const struct net_device_ops bond_netdev_ops = {
4217
	.ndo_init		= bond_init,
S
Stephen Hemminger 已提交
4218
	.ndo_uninit		= bond_uninit,
4219 4220
	.ndo_open		= bond_open,
	.ndo_stop		= bond_close,
4221
	.ndo_start_xmit		= bond_start_xmit,
4222
	.ndo_select_queue	= bond_select_queue,
4223
	.ndo_get_stats64	= bond_get_stats,
4224
	.ndo_do_ioctl		= bond_do_ioctl,
4225
	.ndo_change_rx_flags	= bond_change_rx_flags,
4226
	.ndo_set_rx_mode	= bond_set_rx_mode,
4227
	.ndo_change_mtu		= bond_change_mtu,
J
Jiri Pirko 已提交
4228
	.ndo_set_mac_address	= bond_set_mac_address,
4229
	.ndo_neigh_setup	= bond_neigh_setup,
J
Jiri Pirko 已提交
4230
	.ndo_vlan_rx_add_vid	= bond_vlan_rx_add_vid,
4231
	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
4232
	.ndo_get_lock_subclass  = bond_get_nest_level,
4233
#ifdef CONFIG_NET_POLL_CONTROLLER
4234
	.ndo_netpoll_setup	= bond_netpoll_setup,
4235 4236 4237
	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
	.ndo_poll_controller	= bond_poll_controller,
#endif
J
Jiri Pirko 已提交
4238 4239
	.ndo_add_slave		= bond_enslave,
	.ndo_del_slave		= bond_release,
4240
	.ndo_fix_features	= bond_fix_features,
4241
	.ndo_features_check	= passthru_features_check,
4242 4243
};

4244 4245 4246 4247
static const struct device_type bond_type = {
	.name = "bond",
};

4248 4249 4250 4251 4252 4253 4254
static void bond_destructor(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);
	if (bond->wq)
		destroy_workqueue(bond->wq);
}

4255
void bond_setup(struct net_device *bond_dev)
L
Linus Torvalds 已提交
4256
{
4257
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
4258

4259
	spin_lock_init(&bond->mode_lock);
E
Eric Dumazet 已提交
4260
	spin_lock_init(&bond->stats_lock);
4261
	bond->params = bonding_defaults;
L
Linus Torvalds 已提交
4262 4263 4264 4265 4266

	/* Initialize pointers */
	bond->dev = bond_dev;

	/* Initialize the device entry points */
4267
	ether_setup(bond_dev);
W
WANG Cong 已提交
4268
	bond_dev->max_mtu = ETH_MAX_MTU;
4269
	bond_dev->netdev_ops = &bond_netdev_ops;
4270
	bond_dev->ethtool_ops = &bond_ethtool_ops;
L
Linus Torvalds 已提交
4271

4272 4273
	bond_dev->needs_free_netdev = true;
	bond_dev->priv_destructor = bond_destructor;
L
Linus Torvalds 已提交
4274

4275 4276
	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);

L
Linus Torvalds 已提交
4277
	/* Initialize the device options */
4278
	bond_dev->flags |= IFF_MASTER;
4279
	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
4280
	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
4281

4282
	/* don't acquire bond device's netif_tx_lock when transmitting */
L
Linus Torvalds 已提交
4283 4284 4285 4286 4287 4288 4289 4290 4291
	bond_dev->features |= NETIF_F_LLTX;

	/* By default, we declare the bond to be fully
	 * VLAN hardware accelerated capable. Special
	 * care is taken in the various xmit functions
	 * when there are slaves that are not hw accel
	 * capable
	 */

4292 4293 4294
	/* Don't allow bond devices to change network namespaces. */
	bond_dev->features |= NETIF_F_NETNS_LOCAL;

4295
	bond_dev->hw_features = BOND_VLAN_FEATURES |
4296 4297 4298
				NETIF_F_HW_VLAN_CTAG_TX |
				NETIF_F_HW_VLAN_CTAG_RX |
				NETIF_F_HW_VLAN_CTAG_FILTER;
4299

4300
	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
4301
	bond_dev->features |= bond_dev->hw_features;
L
Linus Torvalds 已提交
4302 4303
}

4304 4305 4306
/* Destroy a bonding device.
 * Must be under rtnl_lock when this function is called.
 */
4307
static void bond_uninit(struct net_device *bond_dev)
J
Jay Vosburgh 已提交
4308
{
4309
	struct bonding *bond = netdev_priv(bond_dev);
4310 4311
	struct list_head *iter;
	struct slave *slave;
4312
	struct bond_up_slave *arr;
J
Jay Vosburgh 已提交
4313

4314 4315
	bond_netpoll_cleanup(bond_dev);

4316
	/* Release the bonded slaves */
4317
	bond_for_each_slave(bond, slave, iter)
4318
		__bond_release_one(bond_dev, slave->dev, true, true);
4319
	netdev_info(bond_dev, "Released all slaves\n");
4320

4321 4322 4323 4324 4325 4326
	arr = rtnl_dereference(bond->slave_arr);
	if (arr) {
		RCU_INIT_POINTER(bond->slave_arr, NULL);
		kfree_rcu(arr, rcu);
	}

J
Jay Vosburgh 已提交
4327 4328
	list_del(&bond->bond_list);

4329
	bond_debug_unregister(bond);
J
Jay Vosburgh 已提交
4330 4331
}

L
Linus Torvalds 已提交
4332 4333 4334 4335
/*------------------------- Module initialization ---------------------------*/

static int bond_check_params(struct bond_params *params)
{
4336
	int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4337 4338
	struct bond_opt_value newval;
	const struct bond_opt_value *valptr;
4339
	int arp_all_targets_value = 0;
4340
	u16 ad_actor_sys_prio = 0;
4341
	u16 ad_user_port_key = 0;
4342
	__be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
4343 4344 4345 4346
	int arp_ip_count;
	int bond_mode	= BOND_MODE_ROUNDROBIN;
	int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
	int lacp_fast = 0;
4347
	int tlb_dynamic_lb;
4348

4349
	/* Convert string parameters. */
L
Linus Torvalds 已提交
4350
	if (mode) {
4351 4352 4353 4354
		bond_opt_initstr(&newval, mode);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
		if (!valptr) {
			pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
L
Linus Torvalds 已提交
4355 4356
			return -EINVAL;
		}
4357
		bond_mode = valptr->value;
L
Linus Torvalds 已提交
4358 4359
	}

4360
	if (xmit_hash_policy) {
4361 4362 4363
		if (bond_mode == BOND_MODE_ROUNDROBIN ||
		    bond_mode == BOND_MODE_ACTIVEBACKUP ||
		    bond_mode == BOND_MODE_BROADCAST) {
J
Joe Perches 已提交
4364
			pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
J
Joe Perches 已提交
4365
				bond_mode_name(bond_mode));
4366
		} else {
4367 4368 4369 4370
			bond_opt_initstr(&newval, xmit_hash_policy);
			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
						&newval);
			if (!valptr) {
J
Joe Perches 已提交
4371
				pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
4372 4373 4374
				       xmit_hash_policy);
				return -EINVAL;
			}
4375
			xmit_hashtype = valptr->value;
4376 4377 4378
		}
	}

L
Linus Torvalds 已提交
4379 4380
	if (lacp_rate) {
		if (bond_mode != BOND_MODE_8023AD) {
J
Joe Perches 已提交
4381 4382
			pr_info("lacp_rate param is irrelevant in mode %s\n",
				bond_mode_name(bond_mode));
L
Linus Torvalds 已提交
4383
		} else {
4384 4385 4386 4387
			bond_opt_initstr(&newval, lacp_rate);
			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
						&newval);
			if (!valptr) {
J
Joe Perches 已提交
4388
				pr_err("Error: Invalid lacp rate \"%s\"\n",
4389
				       lacp_rate);
L
Linus Torvalds 已提交
4390 4391
				return -EINVAL;
			}
4392
			lacp_fast = valptr->value;
L
Linus Torvalds 已提交
4393 4394 4395
		}
	}

4396
	if (ad_select) {
4397
		bond_opt_initstr(&newval, ad_select);
4398 4399 4400 4401
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
					&newval);
		if (!valptr) {
			pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
4402 4403
			return -EINVAL;
		}
4404 4405
		params->ad_select = valptr->value;
		if (bond_mode != BOND_MODE_8023AD)
4406
			pr_warn("ad_select param only affects 802.3ad mode\n");
4407 4408 4409 4410
	} else {
		params->ad_select = BOND_AD_STABLE;
	}

4411
	if (max_bonds < 0) {
4412 4413
		pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
			max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
L
Linus Torvalds 已提交
4414 4415 4416 4417
		max_bonds = BOND_DEFAULT_MAX_BONDS;
	}

	if (miimon < 0) {
4418 4419
		pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			miimon, INT_MAX);
4420
		miimon = 0;
L
Linus Torvalds 已提交
4421 4422 4423
	}

	if (updelay < 0) {
4424 4425
		pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			updelay, INT_MAX);
L
Linus Torvalds 已提交
4426 4427 4428 4429
		updelay = 0;
	}

	if (downdelay < 0) {
4430 4431
		pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			downdelay, INT_MAX);
L
Linus Torvalds 已提交
4432 4433 4434
		downdelay = 0;
	}

4435 4436
	if ((use_carrier != 0) && (use_carrier != 1)) {
		pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
4437
			use_carrier);
L
Linus Torvalds 已提交
4438 4439 4440
		use_carrier = 1;
	}

4441
	if (num_peer_notif < 0 || num_peer_notif > 255) {
4442 4443
		pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
			num_peer_notif);
4444 4445 4446
		num_peer_notif = 1;
	}

4447
	/* reset values for 802.3ad/TLB/ALB */
4448
	if (!bond_mode_uses_arp(bond_mode)) {
L
Linus Torvalds 已提交
4449
		if (!miimon) {
4450 4451
			pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
			pr_warn("Forcing miimon to 100msec\n");
4452
			miimon = BOND_DEFAULT_MIIMON;
L
Linus Torvalds 已提交
4453 4454 4455
		}
	}

4456
	if (tx_queues < 1 || tx_queues > 255) {
4457 4458
		pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
			tx_queues, BOND_DEFAULT_TX_QUEUES);
4459 4460 4461
		tx_queues = BOND_DEFAULT_TX_QUEUES;
	}

4462
	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
4463 4464
		pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
			all_slaves_active);
4465 4466 4467
		all_slaves_active = 0;
	}

4468
	if (resend_igmp < 0 || resend_igmp > 255) {
4469 4470
		pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
			resend_igmp, BOND_DEFAULT_RESEND_IGMP);
4471 4472 4473
		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
	}

4474 4475
	bond_opt_initval(&newval, packets_per_slave);
	if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
4476 4477 4478 4479 4480
		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
			packets_per_slave, USHRT_MAX);
		packets_per_slave = 1;
	}

L
Linus Torvalds 已提交
4481
	if (bond_mode == BOND_MODE_ALB) {
J
Joe Perches 已提交
4482 4483
		pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
			  updelay);
L
Linus Torvalds 已提交
4484 4485 4486 4487 4488 4489 4490
	}

	if (!miimon) {
		if (updelay || downdelay) {
			/* just warn the user the up/down delay will have
			 * no effect since miimon is zero...
			 */
4491 4492
			pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
				updelay, downdelay);
L
Linus Torvalds 已提交
4493 4494 4495 4496
		}
	} else {
		/* don't allow arp monitoring */
		if (arp_interval) {
4497 4498
			pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
				miimon, arp_interval);
L
Linus Torvalds 已提交
4499 4500 4501 4502
			arp_interval = 0;
		}

		if ((updelay % miimon) != 0) {
4503 4504
			pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
				updelay, miimon, (updelay / miimon) * miimon);
L
Linus Torvalds 已提交
4505 4506 4507 4508 4509
		}

		updelay /= miimon;

		if ((downdelay % miimon) != 0) {
4510 4511 4512
			pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
				downdelay, miimon,
				(downdelay / miimon) * miimon);
L
Linus Torvalds 已提交
4513 4514 4515 4516 4517 4518
		}

		downdelay /= miimon;
	}

	if (arp_interval < 0) {
4519 4520
		pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			arp_interval, INT_MAX);
4521
		arp_interval = 0;
L
Linus Torvalds 已提交
4522 4523
	}

4524 4525
	for (arp_ip_count = 0, i = 0;
	     (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
4526
		__be32 ip;
4527 4528

		/* not a complete check, but good enough to catch mistakes */
4529
		if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
4530
		    !bond_is_ip_target_ok(ip)) {
4531 4532
			pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
				arp_ip_target[i]);
L
Linus Torvalds 已提交
4533 4534
			arp_interval = 0;
		} else {
4535 4536 4537
			if (bond_get_targets_ip(arp_target, ip) == -1)
				arp_target[arp_ip_count++] = ip;
			else
4538 4539
				pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
					&ip);
L
Linus Torvalds 已提交
4540 4541 4542 4543 4544
		}
	}

	if (arp_interval && !arp_ip_count) {
		/* don't allow arping if no arp_ip_target given... */
4545 4546
		pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
			arp_interval);
L
Linus Torvalds 已提交
4547 4548 4549
		arp_interval = 0;
	}

4550 4551
	if (arp_validate) {
		if (!arp_interval) {
J
Joe Perches 已提交
4552
			pr_err("arp_validate requires arp_interval\n");
4553 4554 4555
			return -EINVAL;
		}

4556 4557 4558 4559
		bond_opt_initstr(&newval, arp_validate);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4560
			pr_err("Error: invalid arp_validate \"%s\"\n",
4561
			       arp_validate);
4562 4563
			return -EINVAL;
		}
4564 4565
		arp_validate_value = valptr->value;
	} else {
4566
		arp_validate_value = 0;
4567
	}
4568

4569
	if (arp_all_targets) {
4570 4571 4572 4573
		bond_opt_initstr(&newval, arp_all_targets);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
					&newval);
		if (!valptr) {
4574 4575 4576
			pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
			       arp_all_targets);
			arp_all_targets_value = 0;
4577 4578
		} else {
			arp_all_targets_value = valptr->value;
4579 4580 4581
		}
	}

L
Linus Torvalds 已提交
4582
	if (miimon) {
J
Joe Perches 已提交
4583
		pr_info("MII link monitoring set to %d ms\n", miimon);
L
Linus Torvalds 已提交
4584
	} else if (arp_interval) {
4585 4586
		valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
					  arp_validate_value);
J
Joe Perches 已提交
4587
		pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
4588
			arp_interval, valptr->string, arp_ip_count);
L
Linus Torvalds 已提交
4589 4590

		for (i = 0; i < arp_ip_count; i++)
J
Joe Perches 已提交
4591
			pr_cont(" %s", arp_ip_target[i]);
L
Linus Torvalds 已提交
4592

J
Joe Perches 已提交
4593
		pr_cont("\n");
L
Linus Torvalds 已提交
4594

4595
	} else if (max_bonds) {
L
Linus Torvalds 已提交
4596 4597 4598
		/* miimon and arp_interval not set, we need one so things
		 * work as expected, see bonding.txt for details
		 */
J
Joe Perches 已提交
4599
		pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
L
Linus Torvalds 已提交
4600 4601
	}

4602
	if (primary && !bond_mode_uses_primary(bond_mode)) {
L
Linus Torvalds 已提交
4603 4604 4605
		/* currently, using a primary only makes sense
		 * in active backup, TLB or ALB modes
		 */
4606 4607
		pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
			primary, bond_mode_name(bond_mode));
L
Linus Torvalds 已提交
4608 4609 4610
		primary = NULL;
	}

4611
	if (primary && primary_reselect) {
4612 4613 4614 4615
		bond_opt_initstr(&newval, primary_reselect);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4616
			pr_err("Error: Invalid primary_reselect \"%s\"\n",
4617
			       primary_reselect);
4618 4619
			return -EINVAL;
		}
4620
		primary_reselect_value = valptr->value;
4621 4622 4623 4624
	} else {
		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
	}

4625
	if (fail_over_mac) {
4626 4627 4628 4629
		bond_opt_initstr(&newval, fail_over_mac);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4630
			pr_err("Error: invalid fail_over_mac \"%s\"\n",
4631
			       fail_over_mac);
4632 4633
			return -EINVAL;
		}
4634
		fail_over_mac_value = valptr->value;
4635
		if (bond_mode != BOND_MODE_ACTIVEBACKUP)
4636
			pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
4637 4638 4639
	} else {
		fail_over_mac_value = BOND_FOM_NONE;
	}
4640

4641 4642 4643 4644 4645 4646 4647 4648 4649 4650
	bond_opt_initstr(&newval, "default");
	valptr = bond_opt_parse(
			bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
				     &newval);
	if (!valptr) {
		pr_err("Error: No ad_actor_sys_prio default value");
		return -EINVAL;
	}
	ad_actor_sys_prio = valptr->value;

4651 4652 4653 4654 4655 4656 4657 4658
	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
				&newval);
	if (!valptr) {
		pr_err("Error: No ad_user_port_key default value");
		return -EINVAL;
	}
	ad_user_port_key = valptr->value;

4659 4660 4661 4662 4663
	bond_opt_initstr(&newval, "default");
	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
	if (!valptr) {
		pr_err("Error: No tlb_dynamic_lb default value");
		return -EINVAL;
4664
	}
4665
	tlb_dynamic_lb = valptr->value;
4666

4667
	if (lp_interval == 0) {
4668 4669
		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
			INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
4670 4671 4672
		lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
	}

L
Linus Torvalds 已提交
4673 4674
	/* fill params struct with the proper values */
	params->mode = bond_mode;
4675
	params->xmit_policy = xmit_hashtype;
L
Linus Torvalds 已提交
4676
	params->miimon = miimon;
4677
	params->num_peer_notif = num_peer_notif;
L
Linus Torvalds 已提交
4678
	params->arp_interval = arp_interval;
4679
	params->arp_validate = arp_validate_value;
4680
	params->arp_all_targets = arp_all_targets_value;
L
Linus Torvalds 已提交
4681 4682 4683 4684 4685
	params->updelay = updelay;
	params->downdelay = downdelay;
	params->use_carrier = use_carrier;
	params->lacp_fast = lacp_fast;
	params->primary[0] = 0;
4686
	params->primary_reselect = primary_reselect_value;
4687
	params->fail_over_mac = fail_over_mac_value;
4688
	params->tx_queues = tx_queues;
4689
	params->all_slaves_active = all_slaves_active;
4690
	params->resend_igmp = resend_igmp;
4691
	params->min_links = min_links;
4692
	params->lp_interval = lp_interval;
4693
	params->packets_per_slave = packets_per_slave;
4694
	params->tlb_dynamic_lb = tlb_dynamic_lb;
4695
	params->ad_actor_sys_prio = ad_actor_sys_prio;
4696
	eth_zero_addr(params->ad_actor_system);
4697
	params->ad_user_port_key = ad_user_port_key;
4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708
	if (packets_per_slave > 0) {
		params->reciprocal_packets_per_slave =
			reciprocal_value(packets_per_slave);
	} else {
		/* reciprocal_packets_per_slave is unused if
		 * packets_per_slave is 0 or 1, just initialize it
		 */
		params->reciprocal_packets_per_slave =
			(struct reciprocal_value) { 0 };
	}

L
Linus Torvalds 已提交
4709 4710 4711 4712 4713 4714 4715 4716 4717 4718
	if (primary) {
		strncpy(params->primary, primary, IFNAMSIZ);
		params->primary[IFNAMSIZ - 1] = 0;
	}

	memcpy(params->arp_targets, arp_target, sizeof(arp_target));

	return 0;
}

4719
/* Called from registration process */
4720 4721 4722
static int bond_init(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);
4723
	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
4724

4725
	netdev_dbg(bond_dev, "Begin bond_init\n");
4726

4727
	bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
4728 4729 4730
	if (!bond->wq)
		return -ENOMEM;

4731
	bond->nest_level = SINGLE_DEPTH_NESTING;
4732
	netdev_lockdep_set_classes(bond_dev);
4733

4734
	list_add_tail(&bond->bond_list, &bn->dev_list);
4735

4736
	bond_prepare_sysfs_group(bond);
4737

4738 4739
	bond_debug_register(bond);

4740 4741
	/* Ensure valid dev_addr */
	if (is_zero_ether_addr(bond_dev->dev_addr) &&
4742
	    bond_dev->addr_assign_type == NET_ADDR_PERM)
4743 4744
		eth_hw_addr_random(bond_dev);

4745 4746 4747
	return 0;
}

4748
unsigned int bond_get_num_tx_queues(void)
4749
{
4750
	return tx_queues;
4751 4752
}

4753
/* Create a new bond based on the specified name and bonding parameters.
4754
 * If name is NULL, obtain a suitable "bond%d" name for us.
4755 4756 4757
 * Caller must NOT hold rtnl_lock; we need to release it here before we
 * set up our sysfs entries.
 */
4758
int bond_create(struct net *net, const char *name)
4759 4760
{
	struct net_device *bond_dev;
4761 4762
	struct bonding *bond;
	struct alb_bond_info *bond_info;
4763 4764 4765
	int res;

	rtnl_lock();
4766

4767
	bond_dev = alloc_netdev_mq(sizeof(struct bonding),
4768
				   name ? name : "bond%d", NET_NAME_UNKNOWN,
4769
				   bond_setup, tx_queues);
4770
	if (!bond_dev) {
J
Joe Perches 已提交
4771
		pr_err("%s: eek! can't alloc netdev!\n", name);
4772 4773
		rtnl_unlock();
		return -ENOMEM;
4774 4775
	}

4776 4777 4778 4779 4780 4781 4782 4783
	/*
	 * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
	 * It is set to 0 by default which is wrong.
	 */
	bond = netdev_priv(bond_dev);
	bond_info = &(BOND_ALB_INFO(bond));
	bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;

4784
	dev_net_set(bond_dev, net);
4785 4786
	bond_dev->rtnl_link_ops = &bond_link_ops;

4787
	res = register_netdevice(bond_dev);
4788

4789 4790
	netif_carrier_off(bond_dev);

4791 4792
	bond_work_init_all(bond);

4793
	rtnl_unlock();
4794
	if (res < 0)
4795
		free_netdev(bond_dev);
E
Eric W. Biederman 已提交
4796
	return res;
4797 4798
}

4799
static int __net_init bond_net_init(struct net *net)
4800
{
4801
	struct bond_net *bn = net_generic(net, bond_net_id);
4802 4803 4804 4805 4806

	bn->net = net;
	INIT_LIST_HEAD(&bn->dev_list);

	bond_create_proc_dir(bn);
4807
	bond_create_sysfs(bn);
4808

4809
	return 0;
4810 4811
}

4812
static void __net_exit bond_net_exit(struct net *net)
4813
{
4814
	struct bond_net *bn = net_generic(net, bond_net_id);
4815 4816
	struct bonding *bond, *tmp_bond;
	LIST_HEAD(list);
4817

4818
	bond_destroy_sysfs(bn);
4819 4820 4821 4822 4823 4824 4825

	/* Kill off any bonds created after unregistering bond rtnl ops */
	rtnl_lock();
	list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
		unregister_netdevice_queue(bond->dev, &list);
	unregister_netdevice_many(&list);
	rtnl_unlock();
4826 4827

	bond_destroy_proc_dir(bn);
4828 4829 4830 4831 4832
}

static struct pernet_operations bond_net_ops = {
	.init = bond_net_init,
	.exit = bond_net_exit,
4833 4834
	.id   = &bond_net_id,
	.size = sizeof(struct bond_net),
4835 4836
};

L
Linus Torvalds 已提交
4837 4838 4839 4840 4841
static int __init bonding_init(void)
{
	int i;
	int res;

4842
	pr_info("%s", bond_version);
L
Linus Torvalds 已提交
4843

4844
	res = bond_check_params(&bonding_defaults);
S
Stephen Hemminger 已提交
4845
	if (res)
4846
		goto out;
L
Linus Torvalds 已提交
4847

4848
	res = register_pernet_subsys(&bond_net_ops);
4849 4850
	if (res)
		goto out;
4851

4852
	res = bond_netlink_init();
4853
	if (res)
4854
		goto err_link;
4855

4856 4857
	bond_create_debugfs();

L
Linus Torvalds 已提交
4858
	for (i = 0; i < max_bonds; i++) {
4859
		res = bond_create(&init_net, NULL);
4860 4861
		if (res)
			goto err;
L
Linus Torvalds 已提交
4862 4863 4864
	}

	register_netdevice_notifier(&bond_netdev_notifier);
4865
out:
L
Linus Torvalds 已提交
4866
	return res;
4867
err:
4868
	bond_destroy_debugfs();
4869
	bond_netlink_fini();
4870
err_link:
4871
	unregister_pernet_subsys(&bond_net_ops);
4872
	goto out;
4873

L
Linus Torvalds 已提交
4874 4875 4876 4877 4878 4879
}

static void __exit bonding_exit(void)
{
	unregister_netdevice_notifier(&bond_netdev_notifier);

4880
	bond_destroy_debugfs();
4881

4882
	bond_netlink_fini();
4883
	unregister_pernet_subsys(&bond_net_ops);
4884 4885

#ifdef CONFIG_NET_POLL_CONTROLLER
4886
	/* Make sure we don't have an imbalance on our netpoll blocking */
4887
	WARN_ON(atomic_read(&netpoll_block_tx));
4888
#endif
L
Linus Torvalds 已提交
4889 4890 4891 4892 4893 4894 4895 4896
}

module_init(bonding_init);
module_exit(bonding_exit);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");