bond_main.c 136.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
 * originally based on the dummy device.
 *
 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
 * Licensed under the GPL. Based on dummy.c, and eql.c devices.
 *
 * bonding.c: an Ethernet Bonding driver
 *
 * This is useful to talk to a Cisco EtherChannel compatible equipment:
 *	Cisco 5500
 *	Sun Trunking (Solaris)
 *	Alteon AceDirector Trunks
 *	Linux Bonding
 *	and probably many L2 switches ...
 *
 * How it works:
 *    ifconfig bond0 ipaddress netmask up
 *      will setup a network device, with an ip address.  No mac address
 *	will be assigned at this time.  The hw mac address will come from
 *	the first slave bonded to the channel.  All slaves will then use
 *	this hw mac address.
 *
 *    ifconfig bond0 down
 *         will release all slaves, marking them as down.
 *
 *    ifenslave bond0 eth0
 *	will attach eth0 to bond0 as a slave.  eth0 hw mac address will either
 *	a: be used as initial mac address
 *	b: if a hw mac address already is there, eth0's hw mac address
 *	   will then be set from bond0.
 *
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
42
#include <net/ip.h>
L
Linus Torvalds 已提交
43
#include <linux/ip.h>
44 45
#include <linux/tcp.h>
#include <linux/udp.h>
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/socket.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/bitops.h>
S
Stephen Hemminger 已提交
54
#include <linux/io.h>
L
Linus Torvalds 已提交
55
#include <asm/dma.h>
S
Stephen Hemminger 已提交
56
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
57 58 59
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
60
#include <linux/igmp.h>
L
Linus Torvalds 已提交
61 62 63 64 65 66 67 68 69 70 71
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/smp.h>
#include <linux/if_ether.h>
#include <net/arp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_bonding.h>
D
David Sterba 已提交
72
#include <linux/jiffies.h>
73
#include <linux/preempt.h>
J
Jay Vosburgh 已提交
74
#include <net/route.h>
75
#include <net/net_namespace.h>
76
#include <net/netns/generic.h>
77
#include <net/pkt_sched.h>
78
#include <linux/rculist.h>
79
#include <net/flow_dissector.h>
80
#include <net/switchdev.h>
81 82 83
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
L
Linus Torvalds 已提交
84

85 86
#include "bonding_priv.h"

L
Linus Torvalds 已提交
87 88 89 90 91
/*---------------------------- Module parameters ----------------------------*/

/* monitor all links that often (in milliseconds). <=0 disables monitoring */

static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
92
static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
93
static int num_peer_notif = 1;
94
static int miimon;
S
Stephen Hemminger 已提交
95 96
static int updelay;
static int downdelay;
L
Linus Torvalds 已提交
97
static int use_carrier	= 1;
S
Stephen Hemminger 已提交
98 99
static char *mode;
static char *primary;
100
static char *primary_reselect;
S
Stephen Hemminger 已提交
101
static char *lacp_rate;
102
static int min_links;
S
Stephen Hemminger 已提交
103 104
static char *ad_select;
static char *xmit_hash_policy;
105
static int arp_interval;
S
Stephen Hemminger 已提交
106 107
static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
108
static char *arp_all_targets;
S
Stephen Hemminger 已提交
109
static char *fail_over_mac;
110
static int all_slaves_active;
111
static struct bond_params bonding_defaults;
112
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
113
static int packets_per_slave = 1;
114
static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
L
Linus Torvalds 已提交
115 116 117

module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
118 119
module_param(tx_queues, int, 0);
MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
120
module_param_named(num_grat_arp, num_peer_notif, int, 0644);
121 122
MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
			       "failover event (alias of num_unsol_na)");
123
module_param_named(num_unsol_na, num_peer_notif, int, 0644);
124 125
MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
			       "failover event (alias of num_grat_arp)");
L
Linus Torvalds 已提交
126 127 128 129 130
module_param(miimon, int, 0);
MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
module_param(updelay, int, 0);
MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
module_param(downdelay, int, 0);
131 132
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
			    "in milliseconds");
L
Linus Torvalds 已提交
133
module_param(use_carrier, int, 0);
134
MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
135
			      "0 for off, 1 for on (default)");
L
Linus Torvalds 已提交
136
module_param(mode, charp, 0);
137
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
138 139 140
		       "1 for active-backup, 2 for balance-xor, "
		       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
		       "6 for balance-alb");
L
Linus Torvalds 已提交
141 142
module_param(primary, charp, 0);
MODULE_PARM_DESC(primary, "Primary network device to use");
143 144 145 146 147 148 149 150
module_param(primary_reselect, charp, 0);
MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
				   "once it comes up; "
				   "0 for always (default), "
				   "1 for only if speed of primary is "
				   "better, "
				   "2 for only on active slave "
				   "failure");
L
Linus Torvalds 已提交
151
module_param(lacp_rate, charp, 0);
152 153
MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
			    "0 for slow, 1 for fast");
154
module_param(ad_select, charp, 0);
Z
Zhu Yanjun 已提交
155
MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
156 157
			    "0 for stable (default), 1 for bandwidth, "
			    "2 for count");
158 159 160
module_param(min_links, int, 0);
MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");

161
module_param(xmit_hash_policy, charp, 0);
162
MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
163
				   "0 for layer 2 (default), 1 for layer 3+4, "
164 165
				   "2 for layer 2+3, 3 for encap layer 2+3, "
				   "4 for encap layer 3+4");
L
Linus Torvalds 已提交
166 167 168 169
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
170
module_param(arp_validate, charp, 0);
171 172 173
MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
			       "0 for none (default), 1 for active, "
			       "2 for backup, 3 for all");
174 175
module_param(arp_all_targets, charp, 0);
MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
176
module_param(fail_over_mac, charp, 0);
177 178 179
MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
				"the same MAC; 0 for none (default), "
				"1 for active, 2 for follow");
180
module_param(all_slaves_active, int, 0);
181
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
182
				     "by setting active flag for all slaves; "
183
				     "0 for never (default), 1 for always.");
184
module_param(resend_igmp, int, 0);
185 186
MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
			      "link failure");
187 188 189 190
module_param(packets_per_slave, int, 0);
MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
				    "mode; 0 for a random slave, 1 packet per "
				    "slave (default), >1 packets per slave.");
191 192 193 194
module_param(lp_interval, uint, 0);
MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
			      "the bonding driver sends learning packets to "
			      "each slaves peer switch. The default is 1.");
L
Linus Torvalds 已提交
195 196 197

/*----------------------------- Global variables ----------------------------*/

198
#ifdef CONFIG_NET_POLL_CONTROLLER
199
atomic_t netpoll_block_tx = ATOMIC_INIT(0);
200 201
#endif

202
unsigned int bond_net_id __read_mostly;
L
Linus Torvalds 已提交
203 204 205

/*-------------------------- Forward declarations ---------------------------*/

206
static int bond_init(struct net_device *bond_dev);
207
static void bond_uninit(struct net_device *bond_dev);
208 209
static void bond_get_stats(struct net_device *bond_dev,
			   struct rtnl_link_stats64 *stats);
210
static void bond_slave_arr_handler(struct work_struct *work);
211 212
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
				  int mod);
L
Linus Torvalds 已提交
213 214 215

/*---------------------------- General routines -----------------------------*/

216
const char *bond_mode_name(int mode)
L
Linus Torvalds 已提交
217
{
218 219 220 221 222
	static const char *names[] = {
		[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
		[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
		[BOND_MODE_XOR] = "load balancing (xor)",
		[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
S
Stephen Hemminger 已提交
223
		[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
224 225 226 227
		[BOND_MODE_TLB] = "transmit load balancing",
		[BOND_MODE_ALB] = "adaptive load balancing",
	};

228
	if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
L
Linus Torvalds 已提交
229
		return "unknown";
230 231

	return names[mode];
L
Linus Torvalds 已提交
232 233 234 235 236 237
}

/*---------------------------------- VLAN -----------------------------------*/

/**
 * bond_dev_queue_xmit - Prepare skb for xmit.
S
Stephen Hemminger 已提交
238
 *
L
Linus Torvalds 已提交
239 240 241 242
 * @bond: bond device that got this skb for tx.
 * @skb: hw accel VLAN tagged skb to transmit
 * @slave_dev: slave that is supposed to xmit this skbuff
 */
243
void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
S
Stephen Hemminger 已提交
244
			struct net_device *slave_dev)
L
Linus Torvalds 已提交
245
{
246
	skb->dev = slave_dev;
247

248
	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
249
		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
250
	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
251

252
	if (unlikely(netpoll_tx_running(bond->dev)))
253
		bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
254
	else
255
		dev_queue_xmit(skb);
L
Linus Torvalds 已提交
256 257
}

258
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
J
Jiri Pirko 已提交
259
 * We don't protect the slave list iteration with a lock because:
L
Linus Torvalds 已提交
260 261 262 263
 * a. This operation is performed in IOCTL context,
 * b. The operation is protected by the RTNL semaphore in the 8021q code,
 * c. Holding a lock with BH disabled while directly calling a base driver
 *    entry point is generally a BAD idea.
S
Stephen Hemminger 已提交
264
 *
L
Linus Torvalds 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278
 * The design of synchronization/protection for this operation in the 8021q
 * module is good for one or more VLAN devices over a single physical device
 * and cannot be extended for a teaming solution like bonding, so there is a
 * potential race condition here where a net device from the vlan group might
 * be referenced (either by a base driver or the 8021q code) while it is being
 * removed from the system. However, it turns out we're not making matters
 * worse, and if it works for regular VLAN usage it will work here too.
*/

/**
 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
 * @bond_dev: bonding net device that got called
 * @vid: vlan id being added
 */
279 280
static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
				__be16 proto, u16 vid)
L
Linus Torvalds 已提交
281
{
282
	struct bonding *bond = netdev_priv(bond_dev);
283
	struct slave *slave, *rollback_slave;
284
	struct list_head *iter;
285
	int res;
L
Linus Torvalds 已提交
286

287
	bond_for_each_slave(bond, slave, iter) {
288
		res = vlan_vid_add(slave->dev, proto, vid);
289 290
		if (res)
			goto unwind;
L
Linus Torvalds 已提交
291 292
	}

293
	return 0;
294 295

unwind:
296
	/* unwind to the slave that failed */
297
	bond_for_each_slave(bond, rollback_slave, iter) {
298 299 300 301 302
		if (rollback_slave == slave)
			break;

		vlan_vid_del(rollback_slave->dev, proto, vid);
	}
303 304

	return res;
L
Linus Torvalds 已提交
305 306 307 308 309 310 311
}

/**
 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
 * @bond_dev: bonding net device that got called
 * @vid: vlan id being removed
 */
312 313
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
				 __be16 proto, u16 vid)
L
Linus Torvalds 已提交
314
{
315
	struct bonding *bond = netdev_priv(bond_dev);
316
	struct list_head *iter;
L
Linus Torvalds 已提交
317 318
	struct slave *slave;

319
	bond_for_each_slave(bond, slave, iter)
320
		vlan_vid_del(slave->dev, proto, vid);
L
Linus Torvalds 已提交
321

322 323
	if (bond_is_lb(bond))
		bond_alb_clear_vlan(bond, vid);
324 325

	return 0;
L
Linus Torvalds 已提交
326 327 328 329
}

/*------------------------------- Link status -------------------------------*/

330
/* Set the carrier state for the master according to the state of its
331 332 333 334 335
 * slaves.  If any slaves are up, the master is up.  In 802.3ad mode,
 * do special 802.3ad magic.
 *
 * Returns zero if carrier state does not change, nonzero if it does.
 */
336
int bond_set_carrier(struct bonding *bond)
337
{
338
	struct list_head *iter;
339 340
	struct slave *slave;

341
	if (!bond_has_slaves(bond))
342 343
		goto down;

344
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
345 346
		return bond_3ad_set_carrier(bond);

347
	bond_for_each_slave(bond, slave, iter) {
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
		if (slave->link == BOND_LINK_UP) {
			if (!netif_carrier_ok(bond->dev)) {
				netif_carrier_on(bond->dev);
				return 1;
			}
			return 0;
		}
	}

down:
	if (netif_carrier_ok(bond->dev)) {
		netif_carrier_off(bond->dev);
		return 1;
	}
	return 0;
}

365
/* Get link speed and duplex from the slave's base driver
L
Linus Torvalds 已提交
366
 * using ethtool. If for some reason the call fails or the
367
 * values are invalid, set speed and duplex to -1,
368 369
 * and return. Return 1 if speed or duplex settings are
 * UNKNOWN; 0 otherwise.
L
Linus Torvalds 已提交
370
 */
371
static int bond_update_speed_duplex(struct slave *slave)
L
Linus Torvalds 已提交
372 373
{
	struct net_device *slave_dev = slave->dev;
374
	struct ethtool_link_ksettings ecmd;
375
	int res;
L
Linus Torvalds 已提交
376

377 378
	slave->speed = SPEED_UNKNOWN;
	slave->duplex = DUPLEX_UNKNOWN;
L
Linus Torvalds 已提交
379

380
	res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
381
	if (res < 0)
382
		return 1;
383
	if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
384
		return 1;
385
	switch (ecmd.base.duplex) {
L
Linus Torvalds 已提交
386 387 388 389
	case DUPLEX_FULL:
	case DUPLEX_HALF:
		break;
	default:
390
		return 1;
L
Linus Torvalds 已提交
391 392
	}

393 394
	slave->speed = ecmd.base.speed;
	slave->duplex = ecmd.base.duplex;
L
Linus Torvalds 已提交
395

396
	return 0;
L
Linus Torvalds 已提交
397 398
}

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
const char *bond_slave_link_status(s8 link)
{
	switch (link) {
	case BOND_LINK_UP:
		return "up";
	case BOND_LINK_FAIL:
		return "going down";
	case BOND_LINK_DOWN:
		return "down";
	case BOND_LINK_BACK:
		return "going back";
	default:
		return "unknown";
	}
}

415
/* if <dev> supports MII link status reporting, check its link status.
L
Linus Torvalds 已提交
416 417
 *
 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
S
Stephen Hemminger 已提交
418
 * depending upon the setting of the use_carrier parameter.
L
Linus Torvalds 已提交
419 420 421 422 423 424 425 426 427 428 429
 *
 * Return either BMSR_LSTATUS, meaning that the link is up (or we
 * can't tell and just pretend it is), or 0, meaning that the link is
 * down.
 *
 * If reporting is non-zero, instead of faking link up, return -1 if
 * both ETHTOOL and MII ioctls fail (meaning the device does not
 * support them).  If use_carrier is set, return whatever it says.
 * It'd be nice if there was a good way to tell if a driver supports
 * netif_carrier, but there really isn't.
 */
S
Stephen Hemminger 已提交
430 431
static int bond_check_dev_link(struct bonding *bond,
			       struct net_device *slave_dev, int reporting)
L
Linus Torvalds 已提交
432
{
433
	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
434
	int (*ioctl)(struct net_device *, struct ifreq *, int);
L
Linus Torvalds 已提交
435 436 437
	struct ifreq ifr;
	struct mii_ioctl_data *mii;

438 439 440
	if (!reporting && !netif_running(slave_dev))
		return 0;

441
	if (bond->params.use_carrier)
442
		return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
L
Linus Torvalds 已提交
443

444
	/* Try to get link status using Ethtool first. */
445 446 447
	if (slave_dev->ethtool_ops->get_link)
		return slave_dev->ethtool_ops->get_link(slave_dev) ?
			BMSR_LSTATUS : 0;
448

S
Stephen Hemminger 已提交
449
	/* Ethtool can't be used, fallback to MII ioctls. */
450
	ioctl = slave_ops->ndo_do_ioctl;
L
Linus Torvalds 已提交
451
	if (ioctl) {
452 453 454 455 456 457 458 459
		/* TODO: set pointer to correct ioctl on a per team member
		 *       bases to make this more efficient. that is, once
		 *       we determine the correct ioctl, we will always
		 *       call it and not the others for that team
		 *       member.
		 */

		/* We cannot assume that SIOCGMIIPHY will also read a
L
Linus Torvalds 已提交
460 461 462 463 464 465 466
		 * register; not all network drivers (e.g., e100)
		 * support that.
		 */

		/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
		strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
		mii = if_mii(&ifr);
A
Al Viro 已提交
467
		if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
L
Linus Torvalds 已提交
468
			mii->reg_num = MII_BMSR;
A
Al Viro 已提交
469
			if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
S
Stephen Hemminger 已提交
470
				return mii->val_out & BMSR_LSTATUS;
L
Linus Torvalds 已提交
471 472 473
		}
	}

474
	/* If reporting, report that either there's no dev->do_ioctl,
475
	 * or both SIOCGMIIREG and get_link failed (meaning that we
L
Linus Torvalds 已提交
476 477 478
	 * cannot report link status).  If not reporting, pretend
	 * we're ok.
	 */
S
Stephen Hemminger 已提交
479
	return reporting ? -1 : BMSR_LSTATUS;
L
Linus Torvalds 已提交
480 481 482 483
}

/*----------------------------- Multicast list ------------------------------*/

484
/* Push the promiscuity flag down to appropriate slaves */
485
static int bond_set_promiscuity(struct bonding *bond, int inc)
L
Linus Torvalds 已提交
486
{
487
	struct list_head *iter;
488
	int err = 0;
489

490
	if (bond_uses_primary(bond)) {
491
		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
492 493 494

		if (curr_active)
			err = dev_set_promiscuity(curr_active->dev, inc);
L
Linus Torvalds 已提交
495 496
	} else {
		struct slave *slave;
497

498
		bond_for_each_slave(bond, slave, iter) {
499 500 501
			err = dev_set_promiscuity(slave->dev, inc);
			if (err)
				return err;
L
Linus Torvalds 已提交
502 503
		}
	}
504
	return err;
L
Linus Torvalds 已提交
505 506
}

507
/* Push the allmulti flag down to all slaves */
508
static int bond_set_allmulti(struct bonding *bond, int inc)
L
Linus Torvalds 已提交
509
{
510
	struct list_head *iter;
511
	int err = 0;
512

513
	if (bond_uses_primary(bond)) {
514
		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
515 516 517

		if (curr_active)
			err = dev_set_allmulti(curr_active->dev, inc);
L
Linus Torvalds 已提交
518 519
	} else {
		struct slave *slave;
520

521
		bond_for_each_slave(bond, slave, iter) {
522 523 524
			err = dev_set_allmulti(slave->dev, inc);
			if (err)
				return err;
L
Linus Torvalds 已提交
525 526
		}
	}
527
	return err;
L
Linus Torvalds 已提交
528 529
}

530
/* Retrieve the list of registered multicast addresses for the bonding
531 532 533
 * device and retransmit an IGMP JOIN request to the current active
 * slave.
 */
534
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
535
{
536 537 538
	struct bonding *bond = container_of(work, struct bonding,
					    mcast_work.work);

539
	if (!rtnl_trylock()) {
540
		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
541
		return;
542
	}
543
	call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
544

545 546
	if (bond->igmp_retrans > 1) {
		bond->igmp_retrans--;
547
		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
548
	}
549
	rtnl_unlock();
550 551
}

552
/* Flush bond's hardware addresses from slave */
553
static void bond_hw_addr_flush(struct net_device *bond_dev,
S
Stephen Hemminger 已提交
554
			       struct net_device *slave_dev)
L
Linus Torvalds 已提交
555
{
556
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
557

558 559
	dev_uc_unsync(slave_dev, bond_dev);
	dev_mc_unsync(slave_dev, bond_dev);
L
Linus Torvalds 已提交
560

561
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
L
Linus Torvalds 已提交
562 563 564
		/* del lacpdu mc addr from mc list */
		u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;

565
		dev_mc_del(slave_dev, lacpdu_multicast);
L
Linus Torvalds 已提交
566 567 568 569 570
	}
}

/*--------------------------- Active slave change ---------------------------*/

571
/* Update the hardware address list and promisc/allmulti for the new and
572 573
 * old active slaves (if any).  Modes that are not using primary keep all
 * slaves up date at all times; only the modes that use primary need to call
574
 * this function to swap these settings during a failover.
L
Linus Torvalds 已提交
575
 */
576 577
static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
			      struct slave *old_active)
L
Linus Torvalds 已提交
578 579
{
	if (old_active) {
S
Stephen Hemminger 已提交
580
		if (bond->dev->flags & IFF_PROMISC)
L
Linus Torvalds 已提交
581 582
			dev_set_promiscuity(old_active->dev, -1);

S
Stephen Hemminger 已提交
583
		if (bond->dev->flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
584 585
			dev_set_allmulti(old_active->dev, -1);

586
		bond_hw_addr_flush(bond->dev, old_active->dev);
L
Linus Torvalds 已提交
587 588 589
	}

	if (new_active) {
590
		/* FIXME: Signal errors upstream. */
S
Stephen Hemminger 已提交
591
		if (bond->dev->flags & IFF_PROMISC)
L
Linus Torvalds 已提交
592 593
			dev_set_promiscuity(new_active->dev, 1);

S
Stephen Hemminger 已提交
594
		if (bond->dev->flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
595 596
			dev_set_allmulti(new_active->dev, 1);

597
		netif_addr_lock_bh(bond->dev);
598 599
		dev_uc_sync(new_active->dev, bond->dev);
		dev_mc_sync(new_active->dev, bond->dev);
600
		netif_addr_unlock_bh(bond->dev);
L
Linus Torvalds 已提交
601 602 603
	}
}

604 605 606 607 608 609 610 611 612 613
/**
 * bond_set_dev_addr - clone slave's address to bond
 * @bond_dev: bond net device
 * @slave_dev: slave net device
 *
 * Should be called with RTNL held.
 */
static void bond_set_dev_addr(struct net_device *bond_dev,
			      struct net_device *slave_dev)
{
614 615
	netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n",
		   bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len);
616 617 618 619 620
	memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
	bond_dev->addr_assign_type = NET_ADDR_STOLEN;
	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
}

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
static struct slave *bond_get_old_active(struct bonding *bond,
					 struct slave *new_active)
{
	struct slave *slave;
	struct list_head *iter;

	bond_for_each_slave(bond, slave, iter) {
		if (slave == new_active)
			continue;

		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
			return slave;
	}

	return NULL;
}

638
/* bond_do_fail_over_mac
639 640 641
 *
 * Perform special MAC address swapping for fail_over_mac settings
 *
642
 * Called with RTNL
643 644 645 646 647
 */
static void bond_do_fail_over_mac(struct bonding *bond,
				  struct slave *new_active,
				  struct slave *old_active)
{
648 649
	u8 tmp_mac[MAX_ADDR_LEN];
	struct sockaddr_storage ss;
650 651 652 653
	int rv;

	switch (bond->params.fail_over_mac) {
	case BOND_FOM_ACTIVE:
654
		if (new_active)
655
			bond_set_dev_addr(bond->dev, new_active->dev);
656 657
		break;
	case BOND_FOM_FOLLOW:
658
		/* if new_active && old_active, swap them
659 660 661 662 663 664
		 * if just old_active, do nothing (going to no active slave)
		 * if just new_active, set new_active to bond's MAC
		 */
		if (!new_active)
			return;

665 666 667
		if (!old_active)
			old_active = bond_get_old_active(bond, new_active);

668
		if (old_active) {
669 670 671 672 673 674
			bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
					  new_active->dev->addr_len);
			bond_hw_addr_copy(ss.__data,
					  old_active->dev->dev_addr,
					  old_active->dev->addr_len);
			ss.ss_family = new_active->dev->type;
675
		} else {
676 677 678
			bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
					  bond->dev->addr_len);
			ss.ss_family = bond->dev->type;
679 680
		}

681 682
		rv = dev_set_mac_address(new_active->dev,
					 (struct sockaddr *)&ss);
683
		if (rv) {
684 685
			netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
				   -rv, new_active->dev->name);
686 687 688 689 690 691
			goto out;
		}

		if (!old_active)
			goto out;

692 693 694
		bond_hw_addr_copy(ss.__data, tmp_mac,
				  new_active->dev->addr_len);
		ss.ss_family = old_active->dev->type;
695

696 697
		rv = dev_set_mac_address(old_active->dev,
					 (struct sockaddr *)&ss);
698
		if (rv)
699 700
			netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
				   -rv, new_active->dev->name);
701 702 703
out:
		break;
	default:
704 705
		netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
			   bond->params.fail_over_mac);
706 707 708 709 710
		break;
	}

}

711
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
712
{
713
	struct slave *prim = rtnl_dereference(bond->primary_slave);
714
	struct slave *curr = rtnl_dereference(bond->curr_active_slave);
715

716 717 718 719 720 721
	if (!prim || prim->link != BOND_LINK_UP) {
		if (!curr || curr->link != BOND_LINK_UP)
			return NULL;
		return curr;
	}

722 723
	if (bond->force_primary) {
		bond->force_primary = false;
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
		return prim;
	}

	if (!curr || curr->link != BOND_LINK_UP)
		return prim;

	/* At this point, prim and curr are both up */
	switch (bond->params.primary_reselect) {
	case BOND_PRI_RESELECT_ALWAYS:
		return prim;
	case BOND_PRI_RESELECT_BETTER:
		if (prim->speed < curr->speed)
			return curr;
		if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
			return curr;
		return prim;
	case BOND_PRI_RESELECT_FAILURE:
		return curr;
	default:
		netdev_err(bond->dev, "impossible primary_reselect %d\n",
			   bond->params.primary_reselect);
		return curr;
746 747
	}
}
748

L
Linus Torvalds 已提交
749
/**
750
 * bond_find_best_slave - select the best available slave to be the active one
L
Linus Torvalds 已提交
751 752 753 754
 * @bond: our bonding struct
 */
static struct slave *bond_find_best_slave(struct bonding *bond)
{
755
	struct slave *slave, *bestslave = NULL;
756
	struct list_head *iter;
L
Linus Torvalds 已提交
757 758
	int mintime = bond->params.updelay;

759 760 761
	slave = bond_choose_primary_or_current(bond);
	if (slave)
		return slave;
L
Linus Torvalds 已提交
762

763 764 765
	bond_for_each_slave(bond, slave, iter) {
		if (slave->link == BOND_LINK_UP)
			return slave;
766
		if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
767 768 769
		    slave->delay < mintime) {
			mintime = slave->delay;
			bestslave = slave;
L
Linus Torvalds 已提交
770 771 772 773 774 775
		}
	}

	return bestslave;
}

776 777
static bool bond_should_notify_peers(struct bonding *bond)
{
778 779 780 781 782
	struct slave *slave;

	rcu_read_lock();
	slave = rcu_dereference(bond->curr_active_slave);
	rcu_read_unlock();
783

784 785
	netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
		   slave ? slave->dev->name : "NULL");
786 787

	if (!slave || !bond->send_peer_notif ||
788
	    !netif_carrier_ok(bond->dev) ||
789 790 791 792 793 794
	    test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
		return false;

	return true;
}

L
Linus Torvalds 已提交
795 796 797 798 799 800 801 802 803 804 805 806 807
/**
 * change_active_interface - change the active slave into the specified one
 * @bond: our bonding struct
 * @new: the new slave to make the active one
 *
 * Set the new slave to the bond's settings and unset them on the old
 * curr_active_slave.
 * Setting include flags, mc-list, promiscuity, allmulti, etc.
 *
 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
 * because it is apparently the best available slave we have, even though its
 * updelay hasn't timed out yet.
 *
808
 * Caller must hold RTNL.
L
Linus Torvalds 已提交
809
 */
810
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
L
Linus Torvalds 已提交
811
{
812 813
	struct slave *old_active;

814 815 816
	ASSERT_RTNL();

	old_active = rtnl_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
817

S
Stephen Hemminger 已提交
818
	if (old_active == new_active)
L
Linus Torvalds 已提交
819 820 821
		return;

	if (new_active) {
822
		new_active->last_link_up = jiffies;
823

L
Linus Torvalds 已提交
824
		if (new_active->link == BOND_LINK_BACK) {
825
			if (bond_uses_primary(bond)) {
826 827 828
				netdev_info(bond->dev, "making interface %s the new active one %d ms earlier\n",
					    new_active->dev->name,
					    (bond->params.updelay - new_active->delay) * bond->params.miimon);
L
Linus Torvalds 已提交
829 830 831
			}

			new_active->delay = 0;
832 833
			bond_set_slave_link_state(new_active, BOND_LINK_UP,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
834

835
			if (BOND_MODE(bond) == BOND_MODE_8023AD)
L
Linus Torvalds 已提交
836 837
				bond_3ad_handle_link_change(new_active, BOND_LINK_UP);

838
			if (bond_is_lb(bond))
L
Linus Torvalds 已提交
839 840
				bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
		} else {
841
			if (bond_uses_primary(bond)) {
842 843
				netdev_info(bond->dev, "making interface %s the new active one\n",
					    new_active->dev->name);
L
Linus Torvalds 已提交
844 845 846 847
			}
		}
	}

848
	if (bond_uses_primary(bond))
849
		bond_hw_addr_swap(bond, new_active, old_active);
L
Linus Torvalds 已提交
850

851
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
852
		bond_alb_handle_active_change(bond, new_active);
853
		if (old_active)
854 855
			bond_set_slave_inactive_flags(old_active,
						      BOND_SLAVE_NOTIFY_NOW);
856
		if (new_active)
857 858
			bond_set_slave_active_flags(new_active,
						    BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
859
	} else {
860
		rcu_assign_pointer(bond->curr_active_slave, new_active);
L
Linus Torvalds 已提交
861
	}
J
Jay Vosburgh 已提交
862

863
	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
S
Stephen Hemminger 已提交
864
		if (old_active)
865 866
			bond_set_slave_inactive_flags(old_active,
						      BOND_SLAVE_NOTIFY_NOW);
J
Jay Vosburgh 已提交
867 868

		if (new_active) {
869 870
			bool should_notify_peers = false;

871 872
			bond_set_slave_active_flags(new_active,
						    BOND_SLAVE_NOTIFY_NOW);
873

874 875 876
			if (bond->params.fail_over_mac)
				bond_do_fail_over_mac(bond, new_active,
						      old_active);
877

878 879 880 881 882 883 884
			if (netif_running(bond->dev)) {
				bond->send_peer_notif =
					bond->params.num_peer_notif;
				should_notify_peers =
					bond_should_notify_peers(bond);
			}

885
			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
886
			if (should_notify_peers)
887 888
				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
							 bond->dev);
889
		}
J
Jay Vosburgh 已提交
890
	}
891

892
	/* resend IGMP joins since active slave has changed or
893 894
	 * all were sent on curr_active_slave.
	 * resend only if bond is brought up with the affected
895 896
	 * bonding modes and the retransmission is enabled
	 */
897
	if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
898
	    ((bond_uses_primary(bond) && new_active) ||
899
	     BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
900
		bond->igmp_retrans = bond->params.resend_igmp;
901
		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
902
	}
L
Linus Torvalds 已提交
903 904 905 906 907 908
}

/**
 * bond_select_active_slave - select a new active slave, if needed
 * @bond: our bonding struct
 *
S
Stephen Hemminger 已提交
909
 * This functions should be called when one of the following occurs:
L
Linus Torvalds 已提交
910 911 912 913
 * - The old curr_active_slave has been released or lost its link.
 * - The primary_slave has got its link back.
 * - A slave has got its link back and there's no old curr_active_slave.
 *
914
 * Caller must hold RTNL.
L
Linus Torvalds 已提交
915
 */
916
void bond_select_active_slave(struct bonding *bond)
L
Linus Torvalds 已提交
917 918
{
	struct slave *best_slave;
919
	int rv;
L
Linus Torvalds 已提交
920

921 922
	ASSERT_RTNL();

L
Linus Torvalds 已提交
923
	best_slave = bond_find_best_slave(bond);
924
	if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
L
Linus Torvalds 已提交
925
		bond_change_active_slave(bond, best_slave);
926 927 928 929
		rv = bond_set_carrier(bond);
		if (!rv)
			return;

Z
Zhang Shengju 已提交
930
		if (netif_carrier_ok(bond->dev))
931
			netdev_info(bond->dev, "first active interface up!\n");
Z
Zhang Shengju 已提交
932
		else
933
			netdev_info(bond->dev, "now running without any active interface!\n");
L
Linus Torvalds 已提交
934 935 936
	}
}

937
#ifdef CONFIG_NET_POLL_CONTROLLER
938
static inline int slave_enable_netpoll(struct slave *slave)
939
{
940 941
	struct netpoll *np;
	int err = 0;
942

943
	np = kzalloc(sizeof(*np), GFP_KERNEL);
944 945 946 947
	err = -ENOMEM;
	if (!np)
		goto out;

948
	err = __netpoll_setup(np, slave->dev);
949 950 951
	if (err) {
		kfree(np);
		goto out;
952
	}
953 954 955 956 957 958 959 960 961 962 963 964
	slave->np = np;
out:
	return err;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
	struct netpoll *np = slave->np;

	if (!np)
		return;

	slave->np = NULL;
965
	__netpoll_free_async(np);
966
}
967 968 969

static void bond_poll_controller(struct net_device *bond_dev)
{
970 971 972 973 974 975 976 977 978 979
	struct bonding *bond = netdev_priv(bond_dev);
	struct slave *slave = NULL;
	struct list_head *iter;
	struct ad_info ad_info;

	if (BOND_MODE(bond) == BOND_MODE_8023AD)
		if (bond_3ad_get_active_agg_info(bond, &ad_info))
			return;

	bond_for_each_slave_rcu(bond, slave, iter) {
980
		if (!bond_slave_is_up(slave))
981 982 983 984 985 986 987 988 989 990 991
			continue;

		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			struct aggregator *agg =
			    SLAVE_AD_INFO(slave)->port.aggregator;

			if (agg &&
			    agg->aggregator_identifier != ad_info.aggregator_id)
				continue;
		}

992
		netpoll_poll_dev(slave->dev);
993
	}
994 995
}

996
static void bond_netpoll_cleanup(struct net_device *bond_dev)
997
{
998
	struct bonding *bond = netdev_priv(bond_dev);
999
	struct list_head *iter;
1000 1001
	struct slave *slave;

1002
	bond_for_each_slave(bond, slave, iter)
1003
		if (bond_slave_is_up(slave))
1004
			slave_disable_netpoll(slave);
1005
}
1006

1007
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1008 1009
{
	struct bonding *bond = netdev_priv(dev);
1010
	struct list_head *iter;
1011
	struct slave *slave;
1012
	int err = 0;
1013

1014
	bond_for_each_slave(bond, slave, iter) {
1015 1016
		err = slave_enable_netpoll(slave);
		if (err) {
1017
			bond_netpoll_cleanup(dev);
1018
			break;
1019 1020
		}
	}
1021
	return err;
1022
}
1023 1024 1025 1026 1027 1028 1029 1030
#else
static inline int slave_enable_netpoll(struct slave *slave)
{
	return 0;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
}
1031 1032 1033 1034 1035
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
}
#endif

L
Linus Torvalds 已提交
1036 1037
/*---------------------------------- IOCTL ----------------------------------*/

1038
static netdev_features_t bond_fix_features(struct net_device *dev,
1039
					   netdev_features_t features)
1040
{
1041
	struct bonding *bond = netdev_priv(dev);
1042
	struct list_head *iter;
1043
	netdev_features_t mask;
1044
	struct slave *slave;
1045

1046
	mask = features;
1047

1048
	features &= ~NETIF_F_ONE_FOR_ALL;
1049
	features |= NETIF_F_ALL_FOR_ALL;
1050

1051
	bond_for_each_slave(bond, slave, iter) {
1052 1053
		features = netdev_increment_features(features,
						     slave->dev->features,
1054 1055
						     mask);
	}
1056
	features = netdev_add_tso_features(features, mask);
1057 1058 1059 1060

	return features;
}

1061
#define BOND_VLAN_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1062 1063
				 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
				 NETIF_F_HIGHDMA | NETIF_F_LRO)
1064

1065 1066
#define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
				 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
1067

1068 1069
static void bond_compute_features(struct bonding *bond)
{
1070 1071
	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
					IFF_XMIT_DST_RELEASE_PERM;
1072
	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1073
	netdev_features_t enc_features  = BOND_ENC_FEATURES;
1074 1075 1076
	struct net_device *bond_dev = bond->dev;
	struct list_head *iter;
	struct slave *slave;
1077
	unsigned short max_hard_header_len = ETH_HLEN;
1078 1079
	unsigned int gso_max_size = GSO_MAX_SIZE;
	u16 gso_max_segs = GSO_MAX_SEGS;
1080

1081
	if (!bond_has_slaves(bond))
1082
		goto done;
1083
	vlan_features &= NETIF_F_ALL_FOR_ALL;
1084

1085
	bond_for_each_slave(bond, slave, iter) {
1086
		vlan_features = netdev_increment_features(vlan_features,
1087 1088
			slave->dev->vlan_features, BOND_VLAN_FEATURES);

1089 1090 1091
		enc_features = netdev_increment_features(enc_features,
							 slave->dev->hw_enc_features,
							 BOND_ENC_FEATURES);
1092
		dst_release_flag &= slave->dev->priv_flags;
1093 1094
		if (slave->dev->hard_header_len > max_hard_header_len)
			max_hard_header_len = slave->dev->hard_header_len;
1095 1096 1097

		gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
		gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
1098
	}
1099
	bond_dev->hard_header_len = max_hard_header_len;
1100

1101
done:
1102
	bond_dev->vlan_features = vlan_features;
1103 1104
	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
				    NETIF_F_GSO_UDP_L4;
1105 1106
	bond_dev->gso_max_segs = gso_max_segs;
	netif_set_gso_max_size(bond_dev, gso_max_size);
1107

1108 1109 1110 1111
	bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
	if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
	    dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
		bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1112

1113
	netdev_change_features(bond_dev);
1114 1115
}

1116 1117 1118
static void bond_setup_by_slave(struct net_device *bond_dev,
				struct net_device *slave_dev)
{
1119
	bond_dev->header_ops	    = slave_dev->header_ops;
1120 1121 1122 1123 1124 1125 1126 1127 1128

	bond_dev->type		    = slave_dev->type;
	bond_dev->hard_header_len   = slave_dev->hard_header_len;
	bond_dev->addr_len	    = slave_dev->addr_len;

	memcpy(bond_dev->broadcast, slave_dev->broadcast,
		slave_dev->addr_len);
}

1129
/* On bonding slaves other than the currently active slave, suppress
1130
 * duplicates except for alb non-mcast/bcast.
1131 1132
 */
static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1133 1134
					    struct slave *slave,
					    struct bonding *bond)
1135
{
1136
	if (bond_is_slave_inactive(slave)) {
1137
		if (BOND_MODE(bond) == BOND_MODE_ALB &&
1138 1139 1140 1141 1142 1143 1144 1145
		    skb->pkt_type != PACKET_BROADCAST &&
		    skb->pkt_type != PACKET_MULTICAST)
			return false;
		return true;
	}
	return false;
}

1146
static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1147
{
1148
	struct sk_buff *skb = *pskb;
1149
	struct slave *slave;
1150
	struct bonding *bond;
1151 1152
	int (*recv_probe)(const struct sk_buff *, struct bonding *,
			  struct slave *);
1153
	int ret = RX_HANDLER_ANOTHER;
1154

1155 1156 1157 1158 1159
	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return RX_HANDLER_CONSUMED;

	*pskb = skb;
1160

J
Jiri Pirko 已提交
1161 1162
	slave = bond_slave_get_rcu(skb->dev);
	bond = slave->bond;
1163

1164
	recv_probe = READ_ONCE(bond->recv_probe);
1165
	if (recv_probe) {
1166 1167 1168 1169
		ret = recv_probe(skb, bond, slave);
		if (ret == RX_HANDLER_CONSUMED) {
			consume_skb(skb);
			return ret;
1170 1171 1172
		}
	}

1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
	/* Link-local multicast packets should be passed to the
	 * stack on the link they arrive as well as pass them to the
	 * bond-master device. These packets are mostly usable when
	 * stack receives it with the link on which they arrive
	 * (e.g. LLDP) they also must be available on master. Some of
	 * the use cases include (but are not limited to): LLDP agents
	 * that must be able to operate both on enslaved interfaces as
	 * well as on bonds themselves; linux bridges that must be able
	 * to process/pass BPDUs from attached bonds when any kind of
	 * STP version is enabled on the network.
	 */
	if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);

		if (nskb) {
			nskb->dev = bond->dev;
			netif_rx(nskb);
		}
1191
		return RX_HANDLER_PASS;
1192
	}
Z
Zhang Shengju 已提交
1193
	if (bond_should_deliver_exact_match(skb, slave, bond))
1194
		return RX_HANDLER_EXACT;
1195

J
Jiri Pirko 已提交
1196
	skb->dev = bond->dev;
1197

1198
	if (BOND_MODE(bond) == BOND_MODE_ALB &&
J
Jiri Pirko 已提交
1199
	    bond->dev->priv_flags & IFF_BRIDGE_PORT &&
1200 1201
	    skb->pkt_type == PACKET_HOST) {

1202 1203 1204
		if (unlikely(skb_cow_head(skb,
					  skb->data - skb_mac_header(skb)))) {
			kfree_skb(skb);
1205
			return RX_HANDLER_CONSUMED;
1206
		}
1207 1208
		bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
				  bond->dev->addr_len);
1209 1210
	}

1211
	return ret;
1212 1213
}

1214
static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1215
{
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	switch (BOND_MODE(bond)) {
	case BOND_MODE_ROUNDROBIN:
		return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
	case BOND_MODE_ACTIVEBACKUP:
		return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
	case BOND_MODE_BROADCAST:
		return NETDEV_LAG_TX_TYPE_BROADCAST;
	case BOND_MODE_XOR:
	case BOND_MODE_8023AD:
		return NETDEV_LAG_TX_TYPE_HASH;
	default:
		return NETDEV_LAG_TX_TYPE_UNKNOWN;
	}
}

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
					       enum netdev_lag_tx_type type)
{
	if (type != NETDEV_LAG_TX_TYPE_HASH)
		return NETDEV_LAG_HASH_NONE;

	switch (bond->params.xmit_policy) {
	case BOND_XMIT_POLICY_LAYER2:
		return NETDEV_LAG_HASH_L2;
	case BOND_XMIT_POLICY_LAYER34:
		return NETDEV_LAG_HASH_L34;
	case BOND_XMIT_POLICY_LAYER23:
		return NETDEV_LAG_HASH_L23;
	case BOND_XMIT_POLICY_ENCAP23:
		return NETDEV_LAG_HASH_E23;
	case BOND_XMIT_POLICY_ENCAP34:
		return NETDEV_LAG_HASH_E34;
	default:
		return NETDEV_LAG_HASH_UNKNOWN;
	}
}

1253 1254
static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
				      struct netlink_ext_ack *extack)
1255 1256
{
	struct netdev_lag_upper_info lag_upper_info;
1257
	enum netdev_lag_tx_type type;
1258

1259 1260 1261
	type = bond_lag_tx_type(bond);
	lag_upper_info.tx_type = type;
	lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1262 1263 1264

	return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
					    &lag_upper_info, extack);
1265 1266
}

1267
static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1268
{
1269 1270
	netdev_upper_dev_unlink(slave->dev, bond->dev);
	slave->dev->flags &= ~IFF_SLAVE;
1271 1272
}

1273 1274 1275 1276
static struct slave *bond_alloc_slave(struct bonding *bond)
{
	struct slave *slave = NULL;

Z
Zhang Shengju 已提交
1277
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1278 1279 1280
	if (!slave)
		return NULL;

1281
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
		SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
					       GFP_KERNEL);
		if (!SLAVE_AD_INFO(slave)) {
			kfree(slave);
			return NULL;
		}
	}
	return slave;
}

static void bond_free_slave(struct slave *slave)
{
	struct bonding *bond = bond_get_bond_by_slave(slave);

1296
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1297 1298 1299 1300 1301
		kfree(SLAVE_AD_INFO(slave));

	kfree(slave);
}

1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
{
	info->bond_mode = BOND_MODE(bond);
	info->miimon = bond->params.miimon;
	info->num_slaves = bond->slave_cnt;
}

static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
{
	strcpy(info->slave_name, slave->dev->name);
	info->link = slave->link;
	info->state = bond_slave_state(slave);
	info->link_failure_count = slave->link_failure_count;
}

1317 1318
static void bond_netdev_notify(struct net_device *dev,
			       struct netdev_bonding_info *info)
1319 1320
{
	rtnl_lock();
1321
	netdev_bonding_info_change(dev, info);
1322 1323 1324 1325 1326 1327 1328 1329
	rtnl_unlock();
}

static void bond_netdev_notify_work(struct work_struct *_work)
{
	struct netdev_notify_work *w =
		container_of(_work, struct netdev_notify_work, work.work);

1330
	bond_netdev_notify(w->dev, &w->bonding_info);
1331
	dev_put(w->dev);
1332
	kfree(w);
1333 1334 1335 1336
}

void bond_queue_slave_event(struct slave *slave)
{
1337
	struct bonding *bond = slave->bond;
1338 1339 1340 1341 1342
	struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);

	if (!nnw)
		return;

1343
	dev_hold(slave->dev);
1344
	nnw->dev = slave->dev;
1345 1346 1347
	bond_fill_ifslave(slave, &nnw->bonding_info.slave);
	bond_fill_ifbond(bond, &nnw->bonding_info.master);
	INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
1348

1349
	queue_delayed_work(slave->bond->wq, &nnw->work, 0);
1350 1351
}

1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
void bond_lower_state_changed(struct slave *slave)
{
	struct netdev_lag_lower_state_info info;

	info.link_up = slave->link == BOND_LINK_UP ||
		       slave->link == BOND_LINK_FAIL;
	info.tx_enabled = bond_is_active_slave(slave);
	netdev_lower_state_changed(slave->dev, &info);
}

L
Linus Torvalds 已提交
1362
/* enslave device <slave> to bond device <master> */
D
David Ahern 已提交
1363 1364
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
		 struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
1365
{
1366
	struct bonding *bond = netdev_priv(bond_dev);
1367
	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1368
	struct slave *new_slave = NULL, *prev_slave;
1369
	struct sockaddr_storage ss;
L
Linus Torvalds 已提交
1370
	int link_reporting;
1371
	int res = 0, i;
L
Linus Torvalds 已提交
1372

1373 1374 1375
	if (!bond->params.use_carrier &&
	    slave_dev->ethtool_ops->get_link == NULL &&
	    slave_ops->ndo_do_ioctl == NULL) {
1376 1377
		netdev_warn(bond_dev, "no link monitoring support for %s\n",
			    slave_dev->name);
L
Linus Torvalds 已提交
1378 1379
	}

M
Mahesh Bandewar 已提交
1380 1381
	/* already in-use? */
	if (netdev_is_rx_handler_busy(slave_dev)) {
1382
		NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved");
M
Mahesh Bandewar 已提交
1383 1384
		netdev_err(bond_dev,
			   "Error: Device is in use and cannot be enslaved\n");
L
Linus Torvalds 已提交
1385 1386 1387
		return -EBUSY;
	}

1388
	if (bond_dev == slave_dev) {
1389
		NL_SET_ERR_MSG(extack, "Cannot enslave bond to itself.");
1390
		netdev_err(bond_dev, "cannot enslave bond to itself.\n");
1391 1392 1393
		return -EPERM;
	}

L
Linus Torvalds 已提交
1394 1395 1396
	/* vlan challenged mutual exclusion */
	/* no need to lock since we're protected by rtnl_lock */
	if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1397 1398
		netdev_dbg(bond_dev, "%s is NETIF_F_VLAN_CHALLENGED\n",
			   slave_dev->name);
1399
		if (vlan_uses_dev(bond_dev)) {
1400
			NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond");
1401 1402
			netdev_err(bond_dev, "Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
				   slave_dev->name, bond_dev->name);
L
Linus Torvalds 已提交
1403 1404
			return -EPERM;
		} else {
1405 1406 1407
			netdev_warn(bond_dev, "enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
				    slave_dev->name, slave_dev->name,
				    bond_dev->name);
L
Linus Torvalds 已提交
1408 1409
		}
	} else {
1410 1411
		netdev_dbg(bond_dev, "%s is !NETIF_F_VLAN_CHALLENGED\n",
			   slave_dev->name);
L
Linus Torvalds 已提交
1412 1413
	}

1414
	/* Old ifenslave binaries are no longer supported.  These can
S
Stephen Hemminger 已提交
1415
	 * be identified with moderate accuracy by the state of the slave:
1416 1417 1418
	 * the current ifenslave will set the interface down prior to
	 * enslaving it; the old ifenslave will not.
	 */
Y
yzhu1 已提交
1419
	if (slave_dev->flags & IFF_UP) {
1420
		NL_SET_ERR_MSG(extack, "Device can not be enslaved while up");
1421 1422
		netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n",
			   slave_dev->name);
1423
		return -EPERM;
1424
	}
L
Linus Torvalds 已提交
1425

1426 1427 1428 1429 1430 1431 1432
	/* set bonding device ether type by slave - bonding netdevices are
	 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
	 * there is a need to override some of the type dependent attribs/funcs.
	 *
	 * bond ether type mutual exclusion - don't allow slaves of dissimilar
	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
	 */
1433
	if (!bond_has_slaves(bond)) {
1434
		if (bond_dev->type != slave_dev->type) {
1435 1436
			netdev_dbg(bond_dev, "change device type from %d to %d\n",
				   bond_dev->type, slave_dev->type);
1437

1438 1439
			res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
						       bond_dev);
1440 1441
			res = notifier_to_errno(res);
			if (res) {
1442
				netdev_err(bond_dev, "refused to change device type\n");
1443
				return -EBUSY;
1444
			}
1445

1446
			/* Flush unicast and multicast addresses */
1447
			dev_uc_flush(bond_dev);
1448
			dev_mc_flush(bond_dev);
1449

1450 1451
			if (slave_dev->type != ARPHRD_ETHER)
				bond_setup_by_slave(bond_dev, slave_dev);
1452
			else {
1453
				ether_setup(bond_dev);
1454 1455
				bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
			}
1456

1457 1458
			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
						 bond_dev);
1459
		}
1460
	} else if (bond_dev->type != slave_dev->type) {
1461
		NL_SET_ERR_MSG(extack, "Device type is different from other slaves");
1462 1463
		netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
			   slave_dev->name, slave_dev->type, bond_dev->type);
1464
		return -EINVAL;
1465 1466
	}

1467 1468
	if (slave_dev->type == ARPHRD_INFINIBAND &&
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1469
		NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves");
1470 1471 1472 1473 1474 1475 1476 1477
		netdev_warn(bond_dev, "Type (%d) supports only active-backup mode\n",
			    slave_dev->type);
		res = -EOPNOTSUPP;
		goto err_undo_flags;
	}

	if (!slave_ops->ndo_set_mac_address ||
	    slave_dev->type == ARPHRD_INFINIBAND) {
1478
		netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
1479 1480 1481
		if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
		    bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
			if (!bond_has_slaves(bond)) {
1482
				bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1483
				netdev_warn(bond_dev, "Setting fail_over_mac to active for active-backup mode\n");
1484
			} else {
1485
				NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1486
				netdev_err(bond_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
1487 1488
				res = -EOPNOTSUPP;
				goto err_undo_flags;
1489
			}
1490
		}
L
Linus Torvalds 已提交
1491 1492
	}

1493 1494
	call_netdevice_notifiers(NETDEV_JOIN, slave_dev);

1495
	/* If this is the first slave, then we need to set the master's hardware
1496 1497
	 * address to be the same as the slave's.
	 */
1498
	if (!bond_has_slaves(bond) &&
1499
	    bond->dev->addr_assign_type == NET_ADDR_RANDOM)
1500
		bond_set_dev_addr(bond->dev, slave_dev);
1501

1502
	new_slave = bond_alloc_slave(bond);
L
Linus Torvalds 已提交
1503 1504 1505 1506
	if (!new_slave) {
		res = -ENOMEM;
		goto err_undo_flags;
	}
1507

1508 1509
	new_slave->bond = bond;
	new_slave->dev = slave_dev;
1510
	/* Set the new_slave's queue_id to be zero.  Queue ID mapping
1511 1512 1513 1514
	 * is set via sysfs or module option if desired.
	 */
	new_slave->queue_id = 0;

1515 1516 1517 1518
	/* Save slave's original mtu and then set it to match the bond */
	new_slave->original_mtu = slave_dev->mtu;
	res = dev_set_mtu(slave_dev, bond->dev->mtu);
	if (res) {
1519
		netdev_dbg(bond_dev, "Error %d calling dev_set_mtu\n", res);
1520 1521 1522
		goto err_free;
	}

1523
	/* Save slave's original ("permanent") mac address for modes
1524 1525 1526
	 * that need it, and for restoring it upon release, and then
	 * set it to the master's address
	 */
1527 1528
	bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
			  slave_dev->addr_len);
L
Linus Torvalds 已提交
1529

1530
	if (!bond->params.fail_over_mac ||
1531
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1532
		/* Set slave to master's mac address.  The application already
1533 1534
		 * set the master's mac address to that of the first slave
		 */
1535 1536 1537
		memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
		ss.ss_family = slave_dev->type;
		res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
1538
		if (res) {
1539
			netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
1540
			goto err_restore_mtu;
1541
		}
1542
	}
L
Linus Torvalds 已提交
1543

1544 1545 1546
	/* set slave flag before open to prevent IPv6 addrconf */
	slave_dev->flags |= IFF_SLAVE;

1547 1548 1549
	/* open the slave since the application closed it */
	res = dev_open(slave_dev);
	if (res) {
1550
		netdev_dbg(bond_dev, "Opening slave %s failed\n", slave_dev->name);
1551
		goto err_restore_mac;
L
Linus Torvalds 已提交
1552 1553
	}

1554
	slave_dev->priv_flags |= IFF_BONDING;
1555 1556
	/* initialize slave stats */
	dev_get_stats(new_slave->dev, &new_slave->slave_stats);
L
Linus Torvalds 已提交
1557

1558
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
1559 1560 1561 1562
		/* bond_alb_init_slave() must be called before all other stages since
		 * it might fail and we do not want to have to undo everything
		 */
		res = bond_alb_init_slave(bond, new_slave);
S
Stephen Hemminger 已提交
1563
		if (res)
1564
			goto err_close;
L
Linus Torvalds 已提交
1565 1566
	}

1567 1568
	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
	if (res) {
1569 1570
		netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
			   slave_dev->name);
1571
		goto err_close;
1572
	}
L
Linus Torvalds 已提交
1573

1574
	prev_slave = bond_last_slave(bond);
L
Linus Torvalds 已提交
1575 1576 1577 1578

	new_slave->delay = 0;
	new_slave->link_failure_count = 0;

1579 1580
	if (bond_update_speed_duplex(new_slave) &&
	    bond_needs_speed_duplex(bond))
1581
		new_slave->link = BOND_LINK_DOWN;
1582

1583
	new_slave->last_rx = jiffies -
1584
		(msecs_to_jiffies(bond->params.arp_interval) + 1);
1585
	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1586
		new_slave->target_last_arp_rx[i] = new_slave->last_rx;
1587

L
Linus Torvalds 已提交
1588 1589 1590 1591
	if (bond->params.miimon && !bond->params.use_carrier) {
		link_reporting = bond_check_dev_link(bond, slave_dev, 1);

		if ((link_reporting == -1) && !bond->params.arp_interval) {
1592
			/* miimon is set but a bonded network driver
L
Linus Torvalds 已提交
1593 1594 1595 1596 1597 1598 1599
			 * does not support ETHTOOL/MII and
			 * arp_interval is not set.  Note: if
			 * use_carrier is enabled, we will never go
			 * here (because netif_carrier is always
			 * supported); thus, we don't need to change
			 * the messages for netif_carrier.
			 */
1600 1601
			netdev_warn(bond_dev, "MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
				    slave_dev->name);
L
Linus Torvalds 已提交
1602 1603
		} else if (link_reporting == -1) {
			/* unable get link status using mii/ethtool */
1604 1605
			netdev_warn(bond_dev, "can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
				    slave_dev->name);
L
Linus Torvalds 已提交
1606 1607 1608 1609
		}
	}

	/* check for initial state */
1610
	new_slave->link = BOND_LINK_NOCHANGE;
1611 1612 1613
	if (bond->params.miimon) {
		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
			if (bond->params.updelay) {
1614
				bond_set_slave_link_state(new_slave,
1615 1616
							  BOND_LINK_BACK,
							  BOND_SLAVE_NOTIFY_NOW);
1617 1618
				new_slave->delay = bond->params.updelay;
			} else {
1619
				bond_set_slave_link_state(new_slave,
1620 1621
							  BOND_LINK_UP,
							  BOND_SLAVE_NOTIFY_NOW);
1622
			}
L
Linus Torvalds 已提交
1623
		} else {
1624 1625
			bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1626
		}
1627
	} else if (bond->params.arp_interval) {
1628 1629
		bond_set_slave_link_state(new_slave,
					  (netif_carrier_ok(slave_dev) ?
1630 1631
					  BOND_LINK_UP : BOND_LINK_DOWN),
					  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1632
	} else {
1633 1634
		bond_set_slave_link_state(new_slave, BOND_LINK_UP,
					  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1635 1636
	}

1637
	if (new_slave->link != BOND_LINK_DOWN)
1638
		new_slave->last_link_up = jiffies;
1639 1640 1641
	netdev_dbg(bond_dev, "Initial state of slave_dev is BOND_LINK_%s\n",
		   new_slave->link == BOND_LINK_DOWN ? "DOWN" :
		   (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1642

1643
	if (bond_uses_primary(bond) && bond->params.primary[0]) {
L
Linus Torvalds 已提交
1644
		/* if there is a primary slave, remember it */
1645
		if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1646
			rcu_assign_pointer(bond->primary_slave, new_slave);
1647 1648
			bond->force_primary = true;
		}
L
Linus Torvalds 已提交
1649 1650
	}

1651
	switch (BOND_MODE(bond)) {
L
Linus Torvalds 已提交
1652
	case BOND_MODE_ACTIVEBACKUP:
1653 1654
		bond_set_slave_inactive_flags(new_slave,
					      BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1655 1656 1657 1658 1659 1660
		break;
	case BOND_MODE_8023AD:
		/* in 802.3ad mode, the internal mechanism
		 * will activate the slaves in the selected
		 * aggregator
		 */
1661
		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1662
		/* if this is the first slave */
1663
		if (!prev_slave) {
1664
			SLAVE_AD_INFO(new_slave)->id = 1;
L
Linus Torvalds 已提交
1665 1666 1667
			/* Initialize AD with the number of times that the AD timer is called in 1 second
			 * can be called only after the mac address of the bond is set
			 */
1668
			bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
L
Linus Torvalds 已提交
1669
		} else {
1670 1671
			SLAVE_AD_INFO(new_slave)->id =
				SLAVE_AD_INFO(prev_slave)->id + 1;
L
Linus Torvalds 已提交
1672 1673 1674 1675 1676 1677
		}

		bond_3ad_bind_slave(new_slave);
		break;
	case BOND_MODE_TLB:
	case BOND_MODE_ALB:
J
Jiri Pirko 已提交
1678
		bond_set_active_slave(new_slave);
1679
		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1680 1681
		break;
	default:
1682
		netdev_dbg(bond_dev, "This slave is always active in trunk mode\n");
L
Linus Torvalds 已提交
1683 1684

		/* always active in trunk mode */
J
Jiri Pirko 已提交
1685
		bond_set_active_slave(new_slave);
L
Linus Torvalds 已提交
1686 1687 1688 1689 1690

		/* In trunking mode there is little meaning to curr_active_slave
		 * anyway (it holds no special properties of the bond device),
		 * so we can change it without calling change_active_interface()
		 */
1691 1692
		if (!rcu_access_pointer(bond->curr_active_slave) &&
		    new_slave->link == BOND_LINK_UP)
1693
			rcu_assign_pointer(bond->curr_active_slave, new_slave);
S
Stephen Hemminger 已提交
1694

L
Linus Torvalds 已提交
1695 1696 1697
		break;
	} /* switch(bond_mode) */

1698
#ifdef CONFIG_NET_POLL_CONTROLLER
1699
	if (bond->dev->npinfo) {
1700
		if (slave_enable_netpoll(new_slave)) {
1701
			netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
1702
			res = -EBUSY;
1703
			goto err_detach;
1704
		}
1705 1706
	}
#endif
1707

1708 1709 1710
	if (!(bond_dev->features & NETIF_F_LRO))
		dev_disable_lro(slave_dev);

J
Jiri Pirko 已提交
1711 1712 1713
	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
					 new_slave);
	if (res) {
1714
		netdev_dbg(bond_dev, "Error %d calling netdev_rx_handler_register\n", res);
1715
		goto err_detach;
J
Jiri Pirko 已提交
1716 1717
	}

1718
	res = bond_master_upper_dev_link(bond, new_slave, extack);
1719
	if (res) {
1720
		netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res);
1721 1722 1723
		goto err_unregister;
	}

1724 1725
	res = bond_sysfs_slave_add(new_slave);
	if (res) {
1726
		netdev_dbg(bond_dev, "Error %d calling bond_sysfs_slave_add\n", res);
1727 1728 1729
		goto err_upper_unlink;
	}

1730 1731
	bond->nest_level = dev_get_nest_level(bond_dev) + 1;

1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
	/* If the mode uses primary, then the following is handled by
	 * bond_change_active_slave().
	 */
	if (!bond_uses_primary(bond)) {
		/* set promiscuity level to new slave */
		if (bond_dev->flags & IFF_PROMISC) {
			res = dev_set_promiscuity(slave_dev, 1);
			if (res)
				goto err_sysfs_del;
		}

		/* set allmulti level to new slave */
		if (bond_dev->flags & IFF_ALLMULTI) {
			res = dev_set_allmulti(slave_dev, 1);
1746 1747 1748
			if (res) {
				if (bond_dev->flags & IFF_PROMISC)
					dev_set_promiscuity(slave_dev, -1);
1749
				goto err_sysfs_del;
1750
			}
1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
		}

		netif_addr_lock_bh(bond_dev);
		dev_mc_sync_multiple(slave_dev, bond_dev);
		dev_uc_sync_multiple(slave_dev, bond_dev);
		netif_addr_unlock_bh(bond_dev);

		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			/* add lacpdu mc addr to mc list */
			u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;

			dev_mc_add(slave_dev, lacpdu_multicast);
		}
	}

1766 1767 1768 1769
	bond->slave_cnt++;
	bond_compute_features(bond);
	bond_set_carrier(bond);

1770
	if (bond_uses_primary(bond)) {
1771
		block_netpoll_tx();
1772
		bond_select_active_slave(bond);
1773
		unblock_netpoll_tx();
1774
	}
1775

1776
	if (bond_mode_can_use_xmit_hash(bond))
1777 1778
		bond_update_slave_arr(bond, NULL);

1779

1780 1781 1782 1783
	netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
		    slave_dev->name,
		    bond_is_active_slave(new_slave) ? "an active" : "a backup",
		    new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
L
Linus Torvalds 已提交
1784 1785

	/* enslave is successful */
1786
	bond_queue_slave_event(new_slave);
L
Linus Torvalds 已提交
1787 1788 1789
	return 0;

/* Undo stages on error */
1790 1791 1792
err_sysfs_del:
	bond_sysfs_slave_del(new_slave);

1793
err_upper_unlink:
1794
	bond_upper_dev_unlink(bond, new_slave);
1795

1796 1797 1798
err_unregister:
	netdev_rx_handler_unregister(slave_dev);

1799
err_detach:
1800
	vlan_vids_del_by_dev(slave_dev, bond_dev);
1801 1802
	if (rcu_access_pointer(bond->primary_slave) == new_slave)
		RCU_INIT_POINTER(bond->primary_slave, NULL);
1803
	if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
1804
		block_netpoll_tx();
1805
		bond_change_active_slave(bond, NULL);
1806
		bond_select_active_slave(bond);
1807
		unblock_netpoll_tx();
1808
	}
1809 1810
	/* either primary_slave or curr_active_slave might've changed */
	synchronize_rcu();
1811
	slave_disable_netpoll(new_slave);
1812

L
Linus Torvalds 已提交
1813
err_close:
1814
	slave_dev->priv_flags &= ~IFF_BONDING;
L
Linus Torvalds 已提交
1815 1816 1817
	dev_close(slave_dev);

err_restore_mac:
1818
	slave_dev->flags &= ~IFF_SLAVE;
1819
	if (!bond->params.fail_over_mac ||
1820
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1821 1822 1823 1824
		/* XXX TODO - fom follow mode needs to change master's
		 * MAC if this slave's MAC is in use by the bond, or at
		 * least print a warning.
		 */
1825 1826 1827 1828
		bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
				  new_slave->dev->addr_len);
		ss.ss_family = slave_dev->type;
		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
1829
	}
L
Linus Torvalds 已提交
1830

1831 1832 1833
err_restore_mtu:
	dev_set_mtu(slave_dev, new_slave->original_mtu);

L
Linus Torvalds 已提交
1834
err_free:
1835
	bond_free_slave(new_slave);
L
Linus Torvalds 已提交
1836 1837

err_undo_flags:
1838
	/* Enslave of first slave has failed and we need to fix master's mac */
1839 1840 1841 1842 1843
	if (!bond_has_slaves(bond)) {
		if (ether_addr_equal_64bits(bond_dev->dev_addr,
					    slave_dev->dev_addr))
			eth_hw_addr_random(bond_dev);
		if (bond_dev->type != ARPHRD_ETHER) {
1844
			dev_close(bond_dev);
1845 1846 1847 1848 1849
			ether_setup(bond_dev);
			bond_dev->flags |= IFF_MASTER;
			bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
		}
	}
S
Stephen Hemminger 已提交
1850

L
Linus Torvalds 已提交
1851 1852 1853
	return res;
}

1854
/* Try to release the slave device <slave> from the bond device <master>
L
Linus Torvalds 已提交
1855
 * It is legal to access curr_active_slave without a lock because all the function
1856
 * is RTNL-locked. If "all" is true it means that the function is being called
1857
 * while destroying a bond interface and all slaves are being released.
L
Linus Torvalds 已提交
1858 1859 1860 1861 1862 1863 1864
 *
 * The rules for slave state should be:
 *   for Active/Backup:
 *     Active stays on all backups go down
 *   for Bonded connections:
 *     The first up interface should be left on and all others downed.
 */
1865 1866
static int __bond_release_one(struct net_device *bond_dev,
			      struct net_device *slave_dev,
1867
			      bool all, bool unregister)
L
Linus Torvalds 已提交
1868
{
1869
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
1870
	struct slave *slave, *oldcurrent;
1871
	struct sockaddr_storage ss;
1872
	int old_flags = bond_dev->flags;
1873
	netdev_features_t old_features = bond_dev->features;
L
Linus Torvalds 已提交
1874 1875 1876

	/* slave is not a slave or master is not master of this slave */
	if (!(slave_dev->flags & IFF_SLAVE) ||
1877
	    !netdev_has_upper_dev(slave_dev, bond_dev)) {
1878
		netdev_dbg(bond_dev, "cannot release %s\n",
1879
			   slave_dev->name);
L
Linus Torvalds 已提交
1880 1881 1882
		return -EINVAL;
	}

1883
	block_netpoll_tx();
L
Linus Torvalds 已提交
1884 1885 1886 1887

	slave = bond_get_slave_by_dev(bond, slave_dev);
	if (!slave) {
		/* not a slave of this bond */
1888 1889
		netdev_info(bond_dev, "%s not enslaved\n",
			    slave_dev->name);
1890
		unblock_netpoll_tx();
L
Linus Torvalds 已提交
1891 1892 1893
		return -EINVAL;
	}

1894 1895
	bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);

1896 1897
	bond_sysfs_slave_del(slave);

1898 1899 1900
	/* recompute stats just before removing the slave */
	bond_get_stats(bond->dev, &bond->bond_stats);

1901
	bond_upper_dev_unlink(bond, slave);
J
Jiri Pirko 已提交
1902 1903 1904 1905 1906
	/* unregister rx_handler early so bond_handle_frame wouldn't be called
	 * for this slave anymore.
	 */
	netdev_rx_handler_unregister(slave_dev);

1907
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
L
Linus Torvalds 已提交
1908 1909
		bond_3ad_unbind_slave(slave);

1910
	if (bond_mode_can_use_xmit_hash(bond))
1911 1912
		bond_update_slave_arr(bond, slave);

1913 1914 1915
	netdev_info(bond_dev, "Releasing %s interface %s\n",
		    bond_is_active_slave(slave) ? "active" : "backup",
		    slave_dev->name);
L
Linus Torvalds 已提交
1916

1917
	oldcurrent = rcu_access_pointer(bond->curr_active_slave);
L
Linus Torvalds 已提交
1918

1919
	RCU_INIT_POINTER(bond->current_arp_slave, NULL);
L
Linus Torvalds 已提交
1920

1921
	if (!all && (!bond->params.fail_over_mac ||
1922
		     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
1923
		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
1924
		    bond_has_slaves(bond))
1925 1926 1927
			netdev_warn(bond_dev, "the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
				    slave_dev->name, slave->perm_hwaddr,
				    bond_dev->name, slave_dev->name);
1928 1929
	}

1930 1931
	if (rtnl_dereference(bond->primary_slave) == slave)
		RCU_INIT_POINTER(bond->primary_slave, NULL);
L
Linus Torvalds 已提交
1932

1933
	if (oldcurrent == slave)
L
Linus Torvalds 已提交
1934 1935
		bond_change_active_slave(bond, NULL);

1936
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
1937 1938 1939 1940 1941 1942 1943 1944
		/* Must be called only after the slave has been
		 * detached from the list and the curr_active_slave
		 * has been cleared (if our_slave == old_current),
		 * but before a new active slave is selected.
		 */
		bond_alb_deinit_slave(bond, slave);
	}

1945
	if (all) {
1946
		RCU_INIT_POINTER(bond->curr_active_slave, NULL);
1947
	} else if (oldcurrent == slave) {
1948
		/* Note that we hold RTNL over this sequence, so there
1949 1950 1951
		 * is no concern that another slave add/remove event
		 * will interfere.
		 */
L
Linus Torvalds 已提交
1952
		bond_select_active_slave(bond);
1953 1954
	}

1955
	if (!bond_has_slaves(bond)) {
1956
		bond_set_carrier(bond);
1957
		eth_hw_addr_random(bond_dev);
L
Linus Torvalds 已提交
1958 1959
	}

1960
	unblock_netpoll_tx();
1961
	synchronize_rcu();
1962
	bond->slave_cnt--;
L
Linus Torvalds 已提交
1963

1964
	if (!bond_has_slaves(bond)) {
1965
		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
1966 1967
		call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
	}
1968

1969 1970 1971
	bond_compute_features(bond);
	if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
	    (old_features & NETIF_F_VLAN_CHALLENGED))
1972 1973
		netdev_info(bond_dev, "last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
			    slave_dev->name, bond_dev->name);
1974

1975
	vlan_vids_del_by_dev(slave_dev, bond_dev);
L
Linus Torvalds 已提交
1976

1977
	/* If the mode uses primary, then this case was handled above by
1978
	 * bond_change_active_slave(..., NULL)
L
Linus Torvalds 已提交
1979
	 */
1980
	if (!bond_uses_primary(bond)) {
1981 1982 1983 1984 1985 1986 1987 1988
		/* unset promiscuity level from slave
		 * NOTE: The NETDEV_CHANGEADDR call above may change the value
		 * of the IFF_PROMISC flag in the bond_dev, but we need the
		 * value of that flag before that change, as that was the value
		 * when this slave was attached, so we cache at the start of the
		 * function and use it here. Same goes for ALLMULTI below
		 */
		if (old_flags & IFF_PROMISC)
L
Linus Torvalds 已提交
1989 1990 1991
			dev_set_promiscuity(slave_dev, -1);

		/* unset allmulti level from slave */
1992
		if (old_flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
1993 1994
			dev_set_allmulti(slave_dev, -1);

1995
		bond_hw_addr_flush(bond_dev, slave_dev);
L
Linus Torvalds 已提交
1996 1997
	}

1998
	slave_disable_netpoll(slave);
1999

L
Linus Torvalds 已提交
2000 2001 2002
	/* close slave before restoring its mac address */
	dev_close(slave_dev);

2003
	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2004
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2005
		/* restore original ("permanent") mac address */
2006 2007 2008 2009
		bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
				  slave->dev->addr_len);
		ss.ss_family = slave_dev->type;
		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
2010
	}
L
Linus Torvalds 已提交
2011

2012 2013 2014 2015
	if (unregister)
		__dev_set_mtu(slave_dev, slave->original_mtu);
	else
		dev_set_mtu(slave_dev, slave->original_mtu);
2016

2017
	slave_dev->priv_flags &= ~IFF_BONDING;
L
Linus Torvalds 已提交
2018

2019
	bond_free_slave(slave);
L
Linus Torvalds 已提交
2020

2021
	return 0;
L
Linus Torvalds 已提交
2022 2023
}

2024 2025 2026
/* A wrapper used because of ndo_del_link */
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
{
2027
	return __bond_release_one(bond_dev, slave_dev, false, false);
2028 2029
}

2030 2031 2032
/* First release a slave and then destroy the bond if no more slaves are left.
 * Must be under rtnl_lock when this function is called.
 */
2033 2034
static int  bond_release_and_destroy(struct net_device *bond_dev,
				     struct net_device *slave_dev)
2035
{
2036
	struct bonding *bond = netdev_priv(bond_dev);
2037 2038
	int ret;

2039
	ret = __bond_release_one(bond_dev, slave_dev, false, true);
2040
	if (ret == 0 && !bond_has_slaves(bond)) {
2041
		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2042 2043
		netdev_info(bond_dev, "Destroying bond %s\n",
			    bond_dev->name);
2044
		bond_remove_proc_entry(bond);
S
Stephen Hemminger 已提交
2045
		unregister_netdevice(bond_dev);
2046 2047 2048 2049
	}
	return ret;
}

2050
static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
L
Linus Torvalds 已提交
2051
{
2052
	struct bonding *bond = netdev_priv(bond_dev);
2053
	bond_fill_ifbond(bond, info);
L
Linus Torvalds 已提交
2054 2055 2056 2057
}

static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
2058
	struct bonding *bond = netdev_priv(bond_dev);
2059
	struct list_head *iter;
2060
	int i = 0, res = -ENODEV;
L
Linus Torvalds 已提交
2061 2062
	struct slave *slave;

2063
	bond_for_each_slave(bond, slave, iter) {
2064
		if (i++ == (int)info->slave_id) {
2065
			res = 0;
2066
			bond_fill_ifslave(slave, info);
L
Linus Torvalds 已提交
2067 2068 2069 2070
			break;
		}
	}

2071
	return res;
L
Linus Torvalds 已提交
2072 2073 2074 2075
}

/*-------------------------------- Monitoring -------------------------------*/

2076
/* called with rcu_read_lock() */
J
Jay Vosburgh 已提交
2077 2078
static int bond_miimon_inspect(struct bonding *bond)
{
2079
	int link_state, commit = 0;
2080
	struct list_head *iter;
J
Jay Vosburgh 已提交
2081
	struct slave *slave;
2082 2083
	bool ignore_updelay;

2084
	ignore_updelay = !rcu_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
2085

2086
	bond_for_each_slave_rcu(bond, slave, iter) {
J
Jay Vosburgh 已提交
2087
		slave->new_link = BOND_LINK_NOCHANGE;
2088
		slave->link_new_state = slave->link;
L
Linus Torvalds 已提交
2089

J
Jay Vosburgh 已提交
2090
		link_state = bond_check_dev_link(bond, slave->dev, 0);
L
Linus Torvalds 已提交
2091 2092

		switch (slave->link) {
J
Jay Vosburgh 已提交
2093 2094 2095
		case BOND_LINK_UP:
			if (link_state)
				continue;
L
Linus Torvalds 已提交
2096

2097
			bond_propose_link_state(slave, BOND_LINK_FAIL);
2098
			commit++;
J
Jay Vosburgh 已提交
2099 2100
			slave->delay = bond->params.downdelay;
			if (slave->delay) {
2101 2102 2103 2104 2105 2106 2107
				netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
					    (BOND_MODE(bond) ==
					     BOND_MODE_ACTIVEBACKUP) ?
					     (bond_is_active_slave(slave) ?
					      "active " : "backup ") : "",
					    slave->dev->name,
					    bond->params.downdelay * bond->params.miimon);
L
Linus Torvalds 已提交
2108
			}
J
Jay Vosburgh 已提交
2109 2110 2111
			/*FALLTHRU*/
		case BOND_LINK_FAIL:
			if (link_state) {
2112
				/* recovered before downdelay expired */
2113
				bond_propose_link_state(slave, BOND_LINK_UP);
2114
				slave->last_link_up = jiffies;
2115 2116 2117 2118
				netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
					    (bond->params.downdelay - slave->delay) *
					    bond->params.miimon,
					    slave->dev->name);
2119
				commit++;
J
Jay Vosburgh 已提交
2120
				continue;
L
Linus Torvalds 已提交
2121
			}
J
Jay Vosburgh 已提交
2122 2123 2124 2125 2126

			if (slave->delay <= 0) {
				slave->new_link = BOND_LINK_DOWN;
				commit++;
				continue;
L
Linus Torvalds 已提交
2127 2128
			}

J
Jay Vosburgh 已提交
2129 2130 2131 2132 2133 2134 2135
			slave->delay--;
			break;

		case BOND_LINK_DOWN:
			if (!link_state)
				continue;

2136
			bond_propose_link_state(slave, BOND_LINK_BACK);
2137
			commit++;
J
Jay Vosburgh 已提交
2138 2139 2140
			slave->delay = bond->params.updelay;

			if (slave->delay) {
2141 2142 2143 2144 2145
				netdev_info(bond->dev, "link status up for interface %s, enabling it in %d ms\n",
					    slave->dev->name,
					    ignore_updelay ? 0 :
					    bond->params.updelay *
					    bond->params.miimon);
J
Jay Vosburgh 已提交
2146 2147 2148 2149
			}
			/*FALLTHRU*/
		case BOND_LINK_BACK:
			if (!link_state) {
2150
				bond_propose_link_state(slave, BOND_LINK_DOWN);
2151 2152 2153 2154
				netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
					    (bond->params.updelay - slave->delay) *
					    bond->params.miimon,
					    slave->dev->name);
2155
				commit++;
J
Jay Vosburgh 已提交
2156 2157 2158
				continue;
			}

2159 2160 2161
			if (ignore_updelay)
				slave->delay = 0;

J
Jay Vosburgh 已提交
2162 2163 2164
			if (slave->delay <= 0) {
				slave->new_link = BOND_LINK_UP;
				commit++;
2165
				ignore_updelay = false;
J
Jay Vosburgh 已提交
2166
				continue;
L
Linus Torvalds 已提交
2167
			}
J
Jay Vosburgh 已提交
2168 2169

			slave->delay--;
L
Linus Torvalds 已提交
2170
			break;
J
Jay Vosburgh 已提交
2171 2172
		}
	}
L
Linus Torvalds 已提交
2173

J
Jay Vosburgh 已提交
2174 2175
	return commit;
}
L
Linus Torvalds 已提交
2176

2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194
static void bond_miimon_link_change(struct bonding *bond,
				    struct slave *slave,
				    char link)
{
	switch (BOND_MODE(bond)) {
	case BOND_MODE_8023AD:
		bond_3ad_handle_link_change(slave, link);
		break;
	case BOND_MODE_TLB:
	case BOND_MODE_ALB:
		bond_alb_handle_link_change(bond, slave, link);
		break;
	case BOND_MODE_XOR:
		bond_update_slave_arr(bond, NULL);
		break;
	}
}

J
Jay Vosburgh 已提交
2195 2196
static void bond_miimon_commit(struct bonding *bond)
{
2197
	struct list_head *iter;
2198
	struct slave *slave, *primary;
J
Jay Vosburgh 已提交
2199

2200
	bond_for_each_slave(bond, slave, iter) {
J
Jay Vosburgh 已提交
2201 2202 2203
		switch (slave->new_link) {
		case BOND_LINK_NOCHANGE:
			continue;
L
Linus Torvalds 已提交
2204

J
Jay Vosburgh 已提交
2205
		case BOND_LINK_UP:
2206 2207
			if (bond_update_speed_duplex(slave) &&
			    bond_needs_speed_duplex(bond)) {
2208
				slave->link = BOND_LINK_DOWN;
2209 2210 2211 2212
				if (net_ratelimit())
					netdev_warn(bond->dev,
						    "failed to get link speed/duplex for %s\n",
						    slave->dev->name);
2213 2214
				continue;
			}
2215 2216
			bond_set_slave_link_state(slave, BOND_LINK_UP,
						  BOND_SLAVE_NOTIFY_NOW);
2217
			slave->last_link_up = jiffies;
J
Jay Vosburgh 已提交
2218

2219
			primary = rtnl_dereference(bond->primary_slave);
2220
			if (BOND_MODE(bond) == BOND_MODE_8023AD) {
J
Jay Vosburgh 已提交
2221
				/* prevent it from being the active one */
J
Jiri Pirko 已提交
2222
				bond_set_backup_slave(slave);
2223
			} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
J
Jay Vosburgh 已提交
2224
				/* make it immediately active */
J
Jiri Pirko 已提交
2225
				bond_set_active_slave(slave);
2226
			} else if (slave != primary) {
J
Jay Vosburgh 已提交
2227
				/* prevent it from being the active one */
J
Jiri Pirko 已提交
2228
				bond_set_backup_slave(slave);
L
Linus Torvalds 已提交
2229 2230
			}

2231 2232 2233 2234
			netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",
				    slave->dev->name,
				    slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
				    slave->duplex ? "full" : "half");
L
Linus Torvalds 已提交
2235

2236
			bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2237

2238
			if (!bond->curr_active_slave || slave == primary)
J
Jay Vosburgh 已提交
2239
				goto do_failover;
L
Linus Torvalds 已提交
2240

J
Jay Vosburgh 已提交
2241
			continue;
2242

J
Jay Vosburgh 已提交
2243
		case BOND_LINK_DOWN:
J
Jay Vosburgh 已提交
2244 2245 2246
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;

2247 2248
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
2249

2250 2251
			if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
			    BOND_MODE(bond) == BOND_MODE_8023AD)
2252 2253
				bond_set_slave_inactive_flags(slave,
							      BOND_SLAVE_NOTIFY_NOW);
J
Jay Vosburgh 已提交
2254

2255 2256
			netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
				    slave->dev->name);
J
Jay Vosburgh 已提交
2257

2258
			bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2259

2260
			if (slave == rcu_access_pointer(bond->curr_active_slave))
J
Jay Vosburgh 已提交
2261 2262 2263 2264 2265
				goto do_failover;

			continue;

		default:
2266 2267
			netdev_err(bond->dev, "invalid new link %d on slave %s\n",
				   slave->new_link, slave->dev->name);
J
Jay Vosburgh 已提交
2268 2269 2270 2271 2272 2273
			slave->new_link = BOND_LINK_NOCHANGE;

			continue;
		}

do_failover:
2274
		block_netpoll_tx();
J
Jay Vosburgh 已提交
2275
		bond_select_active_slave(bond);
2276
		unblock_netpoll_tx();
J
Jay Vosburgh 已提交
2277 2278 2279
	}

	bond_set_carrier(bond);
L
Linus Torvalds 已提交
2280 2281
}

2282
/* bond_mii_monitor
2283 2284
 *
 * Really a wrapper that splits the mii monitor into two phases: an
J
Jay Vosburgh 已提交
2285 2286 2287
 * inspection, then (if inspection indicates something needs to be done)
 * an acquisition of appropriate locks followed by a commit phase to
 * implement whatever link state changes are indicated.
2288
 */
2289
static void bond_mii_monitor(struct work_struct *work)
2290 2291 2292
{
	struct bonding *bond = container_of(work, struct bonding,
					    mii_work.work);
2293
	bool should_notify_peers = false;
2294
	unsigned long delay;
2295 2296
	struct slave *slave;
	struct list_head *iter;
2297

2298 2299 2300
	delay = msecs_to_jiffies(bond->params.miimon);

	if (!bond_has_slaves(bond))
J
Jay Vosburgh 已提交
2301
		goto re_arm;
2302

2303 2304
	rcu_read_lock();

2305 2306
	should_notify_peers = bond_should_notify_peers(bond);

2307
	if (bond_miimon_inspect(bond)) {
2308
		rcu_read_unlock();
J
Jay Vosburgh 已提交
2309

2310 2311 2312 2313 2314 2315
		/* Race avoidance with bond_close cancel of workqueue */
		if (!rtnl_trylock()) {
			delay = 1;
			should_notify_peers = false;
			goto re_arm;
		}
2316

2317 2318 2319
		bond_for_each_slave(bond, slave, iter) {
			bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
		}
2320 2321 2322
		bond_miimon_commit(bond);

		rtnl_unlock();	/* might sleep, hold no other locks */
2323 2324
	} else
		rcu_read_unlock();
2325

J
Jay Vosburgh 已提交
2326
re_arm:
2327
	if (bond->params.miimon)
2328 2329 2330 2331 2332 2333 2334 2335
		queue_delayed_work(bond->wq, &bond->mii_work, delay);

	if (should_notify_peers) {
		if (!rtnl_trylock())
			return;
		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
		rtnl_unlock();
	}
2336
}
J
Jay Vosburgh 已提交
2337

2338 2339 2340 2341 2342 2343 2344
static int bond_upper_dev_walk(struct net_device *upper, void *data)
{
	__be32 ip = *((__be32 *)data);

	return ip == bond_confirm_addr(upper, 0, ip);
}

2345
static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2346
{
2347
	bool ret = false;
2348

2349
	if (ip == bond_confirm_addr(bond->dev, 0, ip))
2350
		return true;
2351

2352
	rcu_read_lock();
2353 2354
	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &ip))
		ret = true;
2355
	rcu_read_unlock();
2356

2357
	return ret;
2358 2359
}

2360
/* We go to the (large) trouble of VLAN tagging ARP frames because
J
Jay Vosburgh 已提交
2361 2362 2363
 * switches in VLAN mode (especially if ports are configured as
 * "native" to a VLAN) might not pass non-tagged frames.
 */
2364 2365
static void bond_arp_send(struct net_device *slave_dev, int arp_op,
			  __be32 dest_ip, __be32 src_ip,
2366
			  struct bond_vlan_tag *tags)
J
Jay Vosburgh 已提交
2367 2368
{
	struct sk_buff *skb;
2369
	struct bond_vlan_tag *outer_tag = tags;
J
Jay Vosburgh 已提交
2370

2371 2372
	netdev_dbg(slave_dev, "arp %d on slave %s: dst %pI4 src %pI4\n",
		   arp_op, slave_dev->name, &dest_ip, &src_ip);
S
Stephen Hemminger 已提交
2373

J
Jay Vosburgh 已提交
2374 2375 2376 2377
	skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
			 NULL, slave_dev->dev_addr, NULL);

	if (!skb) {
2378
		net_err_ratelimited("ARP packet allocation failed\n");
J
Jay Vosburgh 已提交
2379 2380
		return;
	}
2381

2382 2383 2384 2385 2386
	if (!tags || tags->vlan_proto == VLAN_N_VID)
		goto xmit;

	tags++;

2387
	/* Go through all the tags backwards and add them to the packet */
2388 2389 2390
	while (tags->vlan_proto != VLAN_N_VID) {
		if (!tags->vlan_id) {
			tags++;
2391
			continue;
2392
		}
2393

2394
		netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
2395
			   ntohs(outer_tag->vlan_proto), tags->vlan_id);
2396 2397
		skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
						tags->vlan_id);
2398 2399 2400 2401
		if (!skb) {
			net_err_ratelimited("failed to insert inner VLAN tag\n");
			return;
		}
2402 2403

		tags++;
2404 2405
	}
	/* Set the outer tag */
2406
	if (outer_tag->vlan_id) {
2407
		netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
2408
			   ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
J
Jiri Pirko 已提交
2409 2410
		__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
				       outer_tag->vlan_id);
J
Jay Vosburgh 已提交
2411
	}
2412 2413

xmit:
J
Jay Vosburgh 已提交
2414 2415 2416
	arp_xmit(skb);
}

2417 2418 2419 2420 2421 2422
/* Validate the device path between the @start_dev and the @end_dev.
 * The path is valid if the @end_dev is reachable through device
 * stacking.
 * When the path is validated, collect any vlan information in the
 * path.
 */
2423 2424 2425
struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
					      struct net_device *end_dev,
					      int level)
2426
{
2427
	struct bond_vlan_tag *tags;
2428 2429 2430
	struct net_device *upper;
	struct list_head  *iter;

2431
	if (start_dev == end_dev) {
K
Kees Cook 已提交
2432
		tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2433 2434 2435 2436 2437
		if (!tags)
			return ERR_PTR(-ENOMEM);
		tags[level].vlan_proto = VLAN_N_VID;
		return tags;
	}
2438 2439

	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2440 2441 2442 2443 2444
		tags = bond_verify_device_path(upper, end_dev, level + 1);
		if (IS_ERR_OR_NULL(tags)) {
			if (IS_ERR(tags))
				return tags;
			continue;
2445
		}
2446 2447 2448 2449 2450 2451
		if (is_vlan_dev(upper)) {
			tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
			tags[level].vlan_id = vlan_dev_vlan_id(upper);
		}

		return tags;
2452 2453
	}

2454
	return NULL;
2455
}
J
Jay Vosburgh 已提交
2456

L
Linus Torvalds 已提交
2457 2458
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
J
Jay Vosburgh 已提交
2459
	struct rtable *rt;
2460
	struct bond_vlan_tag *tags;
2461
	__be32 *targets = bond->params.arp_targets, addr;
2462
	int i;
L
Linus Torvalds 已提交
2463

2464
	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2465
		netdev_dbg(bond->dev, "basa: target %pI4\n", &targets[i]);
2466
		tags = NULL;
J
Jay Vosburgh 已提交
2467

2468
		/* Find out through which dev should the packet go */
2469 2470
		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
				     RTO_ONLINK, 0);
2471
		if (IS_ERR(rt)) {
2472 2473 2474
			/* there's no route to target - try to send arp
			 * probe to generate any traffic (arp_validate=0)
			 */
2475 2476 2477 2478
			if (bond->params.arp_validate)
				net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
						     bond->dev->name,
						     &targets[i]);
2479 2480
			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
				      0, tags);
J
Jay Vosburgh 已提交
2481 2482 2483
			continue;
		}

2484 2485 2486 2487 2488
		/* bond device itself */
		if (rt->dst.dev == bond->dev)
			goto found;

		rcu_read_lock();
2489
		tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
2490
		rcu_read_unlock();
J
Jay Vosburgh 已提交
2491

2492
		if (!IS_ERR_OR_NULL(tags))
2493 2494
			goto found;

2495
		/* Not our device - skip */
2496 2497
		netdev_dbg(bond->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
			   &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
2498

2499
		ip_rt_put(rt);
2500 2501 2502 2503 2504 2505
		continue;

found:
		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
		ip_rt_put(rt);
		bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2506
			      addr, tags);
2507
		kfree(tags);
J
Jay Vosburgh 已提交
2508 2509 2510
	}
}

2511
static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2512
{
2513 2514
	int i;

2515
	if (!sip || !bond_has_this_ip(bond, tip)) {
2516 2517
		netdev_dbg(bond->dev, "bva: sip %pI4 tip %pI4 not found\n",
			   &sip, &tip);
2518 2519
		return;
	}
2520

2521 2522
	i = bond_get_targets_ip(bond->params.arp_targets, sip);
	if (i == -1) {
2523 2524
		netdev_dbg(bond->dev, "bva: sip %pI4 not found in targets\n",
			   &sip);
2525
		return;
2526
	}
2527
	slave->last_rx = jiffies;
2528
	slave->target_last_arp_rx[i] = jiffies;
2529 2530
}

2531 2532
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
		 struct slave *slave)
2533
{
2534
	struct arphdr *arp = (struct arphdr *)skb->data;
2535
	struct slave *curr_active_slave, *curr_arp_slave;
2536
	unsigned char *arp_ptr;
2537
	__be32 sip, tip;
2538 2539
	int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
	unsigned int alen;
2540

2541
	if (!slave_do_arp_validate(bond, slave)) {
2542 2543
		if ((slave_do_arp_validate_only(bond) && is_arp) ||
		    !slave_do_arp_validate_only(bond))
2544
			slave->last_rx = jiffies;
2545
		return RX_HANDLER_ANOTHER;
2546 2547 2548
	} else if (!is_arp) {
		return RX_HANDLER_ANOTHER;
	}
2549

2550
	alen = arp_hdr_len(bond->dev);
2551

2552 2553
	netdev_dbg(bond->dev, "bond_arp_rcv: skb->dev %s\n",
		   skb->dev->name);
2554

2555 2556 2557 2558 2559 2560 2561
	if (alen > skb_headlen(skb)) {
		arp = kmalloc(alen, GFP_ATOMIC);
		if (!arp)
			goto out_unlock;
		if (skb_copy_bits(skb, 0, arp, alen) < 0)
			goto out_unlock;
	}
2562

2563
	if (arp->ar_hln != bond->dev->addr_len ||
2564 2565 2566 2567 2568 2569 2570 2571
	    skb->pkt_type == PACKET_OTHERHOST ||
	    skb->pkt_type == PACKET_LOOPBACK ||
	    arp->ar_hrd != htons(ARPHRD_ETHER) ||
	    arp->ar_pro != htons(ETH_P_IP) ||
	    arp->ar_pln != 4)
		goto out_unlock;

	arp_ptr = (unsigned char *)(arp + 1);
2572
	arp_ptr += bond->dev->addr_len;
2573
	memcpy(&sip, arp_ptr, 4);
2574
	arp_ptr += 4 + bond->dev->addr_len;
2575 2576
	memcpy(&tip, arp_ptr, 4);

2577 2578 2579 2580
	netdev_dbg(bond->dev, "bond_arp_rcv: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
		   slave->dev->name, bond_slave_state(slave),
		     bond->params.arp_validate, slave_do_arp_validate(bond, slave),
		     &sip, &tip);
2581

2582
	curr_active_slave = rcu_dereference(bond->curr_active_slave);
2583
	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2584

2585
	/* We 'trust' the received ARP enough to validate it if:
2586
	 *
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605
	 * (a) the slave receiving the ARP is active (which includes the
	 * current ARP slave, if any), or
	 *
	 * (b) the receiving slave isn't active, but there is a currently
	 * active slave and it received valid arp reply(s) after it became
	 * the currently active slave, or
	 *
	 * (c) there is an ARP slave that sent an ARP during the prior ARP
	 * interval, and we receive an ARP reply on any slave.  We accept
	 * these because switch FDB update delays may deliver the ARP
	 * reply to a slave other than the sender of the ARP request.
	 *
	 * Note: for (b), backup slaves are receiving the broadcast ARP
	 * request, not a reply.  This request passes from the sending
	 * slave through the L2 switch(es) to the receiving slave.  Since
	 * this is checking the request, sip/tip are swapped for
	 * validation.
	 *
	 * This is done to avoid endless looping when we can't reach the
2606
	 * arp_ip_target and fool ourselves with our own arp requests.
2607
	 */
J
Jiri Pirko 已提交
2608
	if (bond_is_active_slave(slave))
2609
		bond_validate_arp(bond, slave, sip, tip);
2610 2611 2612
	else if (curr_active_slave &&
		 time_after(slave_last_rx(bond, curr_active_slave),
			    curr_active_slave->last_link_up))
2613
		bond_validate_arp(bond, slave, tip, sip);
2614 2615 2616 2617
	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
		 bond_time_in_interval(bond,
				       dev_trans_start(curr_arp_slave->dev), 1))
		bond_validate_arp(bond, slave, sip, tip);
2618 2619

out_unlock:
2620 2621
	if (arp != (struct arphdr *)skb->data)
		kfree(arp);
2622
	return RX_HANDLER_ANOTHER;
2623 2624
}

2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638
/* function to verify if we're in the arp_interval timeslice, returns true if
 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
 */
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
				  int mod)
{
	int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);

	return time_in_range(jiffies,
			     last_act - delta_in_ticks,
			     last_act + mod * delta_in_ticks + delta_in_ticks/2);
}

2639
/* This function is called regularly to monitor each slave's link
L
Linus Torvalds 已提交
2640 2641 2642 2643 2644
 * ensuring that traffic is being sent and received when arp monitoring
 * is used in load-balancing mode. if the adapter has been dormant, then an
 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
 * arp monitoring in active backup mode.
 */
2645
static void bond_loadbalance_arp_mon(struct bonding *bond)
L
Linus Torvalds 已提交
2646 2647
{
	struct slave *slave, *oldcurrent;
2648
	struct list_head *iter;
2649
	int do_failover = 0, slave_state_changed = 0;
L
Linus Torvalds 已提交
2650

2651
	if (!bond_has_slaves(bond))
L
Linus Torvalds 已提交
2652 2653
		goto re_arm;

2654 2655
	rcu_read_lock();

2656
	oldcurrent = rcu_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
2657 2658
	/* see if any of the previous devices are up now (i.e. they have
	 * xmt and rcv traffic). the curr_active_slave does not come into
2659 2660 2661
	 * the picture unless it is null. also, slave->last_link_up is not
	 * needed here because we send an arp on each slave and give a slave
	 * as long as it needs to get the tx/rx within the delta.
L
Linus Torvalds 已提交
2662 2663 2664
	 * TODO: what about up/down delay in arp mode? it wasn't here before
	 *       so it can wait
	 */
2665
	bond_for_each_slave_rcu(bond, slave, iter) {
2666 2667
		unsigned long trans_start = dev_trans_start(slave->dev);

2668 2669
		slave->new_link = BOND_LINK_NOCHANGE;

L
Linus Torvalds 已提交
2670
		if (slave->link != BOND_LINK_UP) {
2671
			if (bond_time_in_interval(bond, trans_start, 1) &&
2672
			    bond_time_in_interval(bond, slave->last_rx, 1)) {
L
Linus Torvalds 已提交
2673

2674
				slave->new_link = BOND_LINK_UP;
2675
				slave_state_changed = 1;
L
Linus Torvalds 已提交
2676 2677 2678 2679 2680 2681 2682

				/* primary_slave has no meaning in round-robin
				 * mode. the window of a slave being up and
				 * curr_active_slave being null after enslaving
				 * is closed.
				 */
				if (!oldcurrent) {
2683 2684
					netdev_info(bond->dev, "link status definitely up for interface %s\n",
						    slave->dev->name);
L
Linus Torvalds 已提交
2685 2686
					do_failover = 1;
				} else {
2687 2688
					netdev_info(bond->dev, "interface %s is now up\n",
						    slave->dev->name);
L
Linus Torvalds 已提交
2689 2690 2691 2692 2693 2694 2695 2696 2697
				}
			}
		} else {
			/* slave->link == BOND_LINK_UP */

			/* not all switches will respond to an arp request
			 * when the source ip is 0, so don't take the link down
			 * if we don't know our ip yet
			 */
2698
			if (!bond_time_in_interval(bond, trans_start, 2) ||
2699
			    !bond_time_in_interval(bond, slave->last_rx, 2)) {
L
Linus Torvalds 已提交
2700

2701
				slave->new_link = BOND_LINK_DOWN;
2702
				slave_state_changed = 1;
L
Linus Torvalds 已提交
2703

S
Stephen Hemminger 已提交
2704
				if (slave->link_failure_count < UINT_MAX)
L
Linus Torvalds 已提交
2705 2706
					slave->link_failure_count++;

2707 2708
				netdev_info(bond->dev, "interface %s is now down\n",
					    slave->dev->name);
L
Linus Torvalds 已提交
2709

S
Stephen Hemminger 已提交
2710
				if (slave == oldcurrent)
L
Linus Torvalds 已提交
2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721
					do_failover = 1;
			}
		}

		/* note: if switch is in round-robin mode, all links
		 * must tx arp to ensure all links rx an arp - otherwise
		 * links may oscillate or not come up at all; if switch is
		 * in something like xor mode, there is nothing we can
		 * do - all replies will be rx'ed on same link causing slaves
		 * to be unstable during low/no traffic periods
		 */
2722
		if (bond_slave_is_up(slave))
L
Linus Torvalds 已提交
2723 2724 2725
			bond_arp_send_all(bond, slave);
	}

2726 2727
	rcu_read_unlock();

2728
	if (do_failover || slave_state_changed) {
2729 2730
		if (!rtnl_trylock())
			goto re_arm;
L
Linus Torvalds 已提交
2731

2732 2733 2734 2735 2736
		bond_for_each_slave(bond, slave, iter) {
			if (slave->new_link != BOND_LINK_NOCHANGE)
				slave->link = slave->new_link;
		}

2737 2738
		if (slave_state_changed) {
			bond_slave_state_change(bond);
2739 2740
			if (BOND_MODE(bond) == BOND_MODE_XOR)
				bond_update_slave_arr(bond, NULL);
2741 2742
		}
		if (do_failover) {
2743 2744 2745 2746
			block_netpoll_tx();
			bond_select_active_slave(bond);
			unblock_netpoll_tx();
		}
2747
		rtnl_unlock();
L
Linus Torvalds 已提交
2748 2749 2750
	}

re_arm:
2751
	if (bond->params.arp_interval)
2752 2753
		queue_delayed_work(bond->wq, &bond->arp_work,
				   msecs_to_jiffies(bond->params.arp_interval));
L
Linus Torvalds 已提交
2754 2755
}

2756
/* Called to inspect slaves for active-backup mode ARP monitor link state
2757 2758 2759 2760
 * changes.  Sets new_link in slaves to specify what action should take
 * place for the slave.  Returns 0 if no changes are found, >0 if changes
 * to link states must be committed.
 *
2761
 * Called with rcu_read_lock held.
L
Linus Torvalds 已提交
2762
 */
2763
static int bond_ab_arp_inspect(struct bonding *bond)
L
Linus Torvalds 已提交
2764
{
2765
	unsigned long trans_start, last_rx;
2766
	struct list_head *iter;
2767 2768
	struct slave *slave;
	int commit = 0;
2769

2770
	bond_for_each_slave_rcu(bond, slave, iter) {
2771
		slave->new_link = BOND_LINK_NOCHANGE;
2772
		last_rx = slave_last_rx(bond, slave);
L
Linus Torvalds 已提交
2773

2774
		if (slave->link != BOND_LINK_UP) {
2775
			if (bond_time_in_interval(bond, last_rx, 1)) {
2776 2777 2778 2779 2780
				slave->new_link = BOND_LINK_UP;
				commit++;
			}
			continue;
		}
L
Linus Torvalds 已提交
2781

2782
		/* Give slaves 2*delta after being enslaved or made
2783 2784 2785
		 * active.  This avoids bouncing, as the last receive
		 * times need a full ARP monitor cycle to be updated.
		 */
2786
		if (bond_time_in_interval(bond, slave->last_link_up, 2))
2787 2788
			continue;

2789
		/* Backup slave is down if:
2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800
		 * - No current_arp_slave AND
		 * - more than 3*delta since last receive AND
		 * - the bond has an IP address
		 *
		 * Note: a non-null current_arp_slave indicates
		 * the curr_active_slave went down and we are
		 * searching for a new one; under this condition
		 * we only take the curr_active_slave down - this
		 * gives each slave a chance to tx/rx traffic
		 * before being taken out
		 */
J
Jiri Pirko 已提交
2801
		if (!bond_is_active_slave(slave) &&
2802
		    !rcu_access_pointer(bond->current_arp_slave) &&
2803
		    !bond_time_in_interval(bond, last_rx, 3)) {
2804 2805 2806 2807
			slave->new_link = BOND_LINK_DOWN;
			commit++;
		}

2808
		/* Active slave is down if:
2809 2810 2811 2812
		 * - more than 2*delta since transmitting OR
		 * - (more than 2*delta since receive AND
		 *    the bond has an IP address)
		 */
2813
		trans_start = dev_trans_start(slave->dev);
J
Jiri Pirko 已提交
2814
		if (bond_is_active_slave(slave) &&
2815 2816
		    (!bond_time_in_interval(bond, trans_start, 2) ||
		     !bond_time_in_interval(bond, last_rx, 2))) {
2817 2818 2819
			slave->new_link = BOND_LINK_DOWN;
			commit++;
		}
L
Linus Torvalds 已提交
2820 2821
	}

2822 2823
	return commit;
}
L
Linus Torvalds 已提交
2824

2825
/* Called to commit link state changes noted by inspection step of
2826 2827
 * active-backup mode ARP monitor.
 *
2828
 * Called with RTNL hold.
2829
 */
2830
static void bond_ab_arp_commit(struct bonding *bond)
2831
{
2832
	unsigned long trans_start;
2833
	struct list_head *iter;
2834
	struct slave *slave;
L
Linus Torvalds 已提交
2835

2836
	bond_for_each_slave(bond, slave, iter) {
2837 2838 2839
		switch (slave->new_link) {
		case BOND_LINK_NOCHANGE:
			continue;
2840

2841
		case BOND_LINK_UP:
2842
			trans_start = dev_trans_start(slave->dev);
2843 2844
			if (rtnl_dereference(bond->curr_active_slave) != slave ||
			    (!rtnl_dereference(bond->curr_active_slave) &&
2845
			     bond_time_in_interval(bond, trans_start, 1))) {
2846 2847 2848
				struct slave *current_arp_slave;

				current_arp_slave = rtnl_dereference(bond->current_arp_slave);
2849 2850
				bond_set_slave_link_state(slave, BOND_LINK_UP,
							  BOND_SLAVE_NOTIFY_NOW);
2851
				if (current_arp_slave) {
2852
					bond_set_slave_inactive_flags(
2853
						current_arp_slave,
2854
						BOND_SLAVE_NOTIFY_NOW);
2855
					RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2856
				}
2857

2858 2859
				netdev_info(bond->dev, "link status definitely up for interface %s\n",
					    slave->dev->name);
2860

2861
				if (!rtnl_dereference(bond->curr_active_slave) ||
2862
				    slave == rtnl_dereference(bond->primary_slave))
2863
					goto do_failover;
L
Linus Torvalds 已提交
2864

2865
			}
L
Linus Torvalds 已提交
2866

2867
			continue;
L
Linus Torvalds 已提交
2868

2869 2870 2871 2872
		case BOND_LINK_DOWN:
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;

2873 2874
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
2875 2876
			bond_set_slave_inactive_flags(slave,
						      BOND_SLAVE_NOTIFY_NOW);
2877

2878 2879
			netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
				    slave->dev->name);
2880

2881
			if (slave == rtnl_dereference(bond->curr_active_slave)) {
2882
				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2883
				goto do_failover;
L
Linus Torvalds 已提交
2884
			}
2885 2886

			continue;
2887 2888

		default:
2889 2890
			netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
				   slave->new_link, slave->dev->name);
2891
			continue;
L
Linus Torvalds 已提交
2892 2893
		}

2894
do_failover:
2895
		block_netpoll_tx();
2896
		bond_select_active_slave(bond);
2897
		unblock_netpoll_tx();
2898
	}
L
Linus Torvalds 已提交
2899

2900 2901
	bond_set_carrier(bond);
}
L
Linus Torvalds 已提交
2902

2903
/* Send ARP probes for active-backup mode ARP monitor.
2904
 *
2905
 * Called with rcu_read_lock held.
2906
 */
2907
static bool bond_ab_arp_probe(struct bonding *bond)
2908
{
2909
	struct slave *slave, *before = NULL, *new_slave = NULL,
2910 2911
		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
		     *curr_active_slave = rcu_dereference(bond->curr_active_slave);
2912 2913
	struct list_head *iter;
	bool found = false;
2914
	bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
2915

2916
	if (curr_arp_slave && curr_active_slave)
2917 2918 2919
		netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
			    curr_arp_slave->dev->name,
			    curr_active_slave->dev->name);
L
Linus Torvalds 已提交
2920

2921 2922
	if (curr_active_slave) {
		bond_arp_send_all(bond, curr_active_slave);
2923
		return should_notify_rtnl;
2924
	}
L
Linus Torvalds 已提交
2925

2926 2927 2928 2929
	/* if we don't have a curr_active_slave, search for the next available
	 * backup slave from the current_arp_slave and make it the candidate
	 * for becoming the curr_active_slave
	 */
L
Linus Torvalds 已提交
2930

2931
	if (!curr_arp_slave) {
2932 2933 2934
		curr_arp_slave = bond_first_slave_rcu(bond);
		if (!curr_arp_slave)
			return should_notify_rtnl;
2935
	}
L
Linus Torvalds 已提交
2936

2937
	bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
2938

2939
	bond_for_each_slave_rcu(bond, slave, iter) {
2940
		if (!found && !before && bond_slave_is_up(slave))
2941
			before = slave;
L
Linus Torvalds 已提交
2942

2943
		if (found && !new_slave && bond_slave_is_up(slave))
2944
			new_slave = slave;
2945 2946 2947 2948 2949 2950
		/* if the link state is up at this point, we
		 * mark it down - this can happen if we have
		 * simultaneous link failures and
		 * reselect_active_interface doesn't make this
		 * one the current slave so it is still marked
		 * up when it is actually down
L
Linus Torvalds 已提交
2951
		 */
2952
		if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
2953 2954
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_LATER);
2955 2956
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;
L
Linus Torvalds 已提交
2957

2958
			bond_set_slave_inactive_flags(slave,
2959
						      BOND_SLAVE_NOTIFY_LATER);
2960

2961 2962
			netdev_info(bond->dev, "backup interface %s is now down\n",
				    slave->dev->name);
L
Linus Torvalds 已提交
2963
		}
2964
		if (slave == curr_arp_slave)
2965
			found = true;
2966
	}
2967 2968 2969 2970

	if (!new_slave && before)
		new_slave = before;

2971 2972
	if (!new_slave)
		goto check_state;
2973

2974 2975
	bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
				  BOND_SLAVE_NOTIFY_LATER);
2976
	bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
2977
	bond_arp_send_all(bond, new_slave);
2978
	new_slave->last_link_up = jiffies;
2979
	rcu_assign_pointer(bond->current_arp_slave, new_slave);
2980

2981 2982
check_state:
	bond_for_each_slave_rcu(bond, slave, iter) {
2983
		if (slave->should_notify || slave->should_notify_link) {
2984 2985 2986 2987 2988
			should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
			break;
		}
	}
	return should_notify_rtnl;
2989
}
L
Linus Torvalds 已提交
2990

2991
static void bond_activebackup_arp_mon(struct bonding *bond)
2992
{
2993 2994
	bool should_notify_peers = false;
	bool should_notify_rtnl = false;
2995
	int delta_in_ticks;
L
Linus Torvalds 已提交
2996

2997 2998 2999
	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);

	if (!bond_has_slaves(bond))
3000 3001
		goto re_arm;

3002
	rcu_read_lock();
3003

3004 3005
	should_notify_peers = bond_should_notify_peers(bond);

3006 3007 3008
	if (bond_ab_arp_inspect(bond)) {
		rcu_read_unlock();

3009 3010 3011 3012 3013 3014
		/* Race avoidance with bond_close flush of workqueue */
		if (!rtnl_trylock()) {
			delta_in_ticks = 1;
			should_notify_peers = false;
			goto re_arm;
		}
3015

3016
		bond_ab_arp_commit(bond);
3017

3018
		rtnl_unlock();
3019
		rcu_read_lock();
3020 3021
	}

3022 3023
	should_notify_rtnl = bond_ab_arp_probe(bond);
	rcu_read_unlock();
3024

3025 3026
re_arm:
	if (bond->params.arp_interval)
3027 3028
		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);

3029
	if (should_notify_peers || should_notify_rtnl) {
3030 3031
		if (!rtnl_trylock())
			return;
3032 3033 3034 3035

		if (should_notify_peers)
			call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
						 bond->dev);
3036
		if (should_notify_rtnl) {
3037
			bond_slave_state_notify(bond);
3038 3039
			bond_slave_link_notify(bond);
		}
3040

3041 3042
		rtnl_unlock();
	}
L
Linus Torvalds 已提交
3043 3044
}

3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055
static void bond_arp_monitor(struct work_struct *work)
{
	struct bonding *bond = container_of(work, struct bonding,
					    arp_work.work);

	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
		bond_activebackup_arp_mon(bond);
	else
		bond_loadbalance_arp_mon(bond);
}

L
Linus Torvalds 已提交
3056 3057
/*-------------------------- netdev event handling --------------------------*/

3058
/* Change device name */
L
Linus Torvalds 已提交
3059 3060 3061 3062
static int bond_event_changename(struct bonding *bond)
{
	bond_remove_proc_entry(bond);
	bond_create_proc_entry(bond);
3063

3064 3065
	bond_debug_reregister(bond);

L
Linus Torvalds 已提交
3066 3067 3068
	return NOTIFY_DONE;
}

S
Stephen Hemminger 已提交
3069 3070
static int bond_master_netdev_event(unsigned long event,
				    struct net_device *bond_dev)
L
Linus Torvalds 已提交
3071
{
3072
	struct bonding *event_bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3073 3074 3075 3076

	switch (event) {
	case NETDEV_CHANGENAME:
		return bond_event_changename(event_bond);
3077 3078 3079 3080 3081 3082
	case NETDEV_UNREGISTER:
		bond_remove_proc_entry(event_bond);
		break;
	case NETDEV_REGISTER:
		bond_create_proc_entry(event_bond);
		break;
3083 3084 3085 3086
	case NETDEV_NOTIFY_PEERS:
		if (event_bond->send_peer_notif)
			event_bond->send_peer_notif--;
		break;
L
Linus Torvalds 已提交
3087 3088 3089 3090 3091 3092 3093
	default:
		break;
	}

	return NOTIFY_DONE;
}

S
Stephen Hemminger 已提交
3094 3095
static int bond_slave_netdev_event(unsigned long event,
				   struct net_device *slave_dev)
L
Linus Torvalds 已提交
3096
{
3097
	struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3098 3099
	struct bonding *bond;
	struct net_device *bond_dev;
L
Linus Torvalds 已提交
3100

3101 3102 3103 3104 3105 3106 3107 3108
	/* A netdev event can be generated while enslaving a device
	 * before netdev_rx_handler_register is called in which case
	 * slave will be NULL
	 */
	if (!slave)
		return NOTIFY_DONE;
	bond_dev = slave->bond->dev;
	bond = slave->bond;
3109
	primary = rtnl_dereference(bond->primary_slave);
3110

L
Linus Torvalds 已提交
3111 3112
	switch (event) {
	case NETDEV_UNREGISTER:
3113
		if (bond_dev->type != ARPHRD_ETHER)
3114 3115
			bond_release_and_destroy(bond_dev, slave_dev);
		else
3116
			__bond_release_one(bond_dev, slave_dev, false, true);
L
Linus Torvalds 已提交
3117
		break;
3118
	case NETDEV_UP:
L
Linus Torvalds 已提交
3119
	case NETDEV_CHANGE:
3120 3121 3122 3123 3124 3125 3126 3127 3128 3129
		/* For 802.3ad mode only:
		 * Getting invalid Speed/Duplex values here will put slave
		 * in weird state. So mark it as link-down for the time
		 * being and let link-monitoring (miimon) set it right when
		 * correct speeds/duplex are available.
		 */
		if (bond_update_speed_duplex(slave) &&
		    BOND_MODE(bond) == BOND_MODE_8023AD)
			slave->link = BOND_LINK_DOWN;

3130 3131
		if (BOND_MODE(bond) == BOND_MODE_8023AD)
			bond_3ad_adapter_speed_duplex_changed(slave);
M
Mahesh Bandewar 已提交
3132 3133
		/* Fallthrough */
	case NETDEV_DOWN:
3134 3135 3136 3137 3138 3139 3140 3141
		/* Refresh slave-array if applicable!
		 * If the setup does not use miimon or arpmon (mode-specific!),
		 * then these events will not cause the slave-array to be
		 * refreshed. This will cause xmit to use a slave that is not
		 * usable. Avoid such situation by refeshing the array at these
		 * events. If these (miimon/arpmon) parameters are configured
		 * then array gets refreshed twice and that should be fine!
		 */
3142
		if (bond_mode_can_use_xmit_hash(bond))
3143
			bond_update_slave_arr(bond, NULL);
L
Linus Torvalds 已提交
3144 3145
		break;
	case NETDEV_CHANGEMTU:
3146
		/* TODO: Should slaves be allowed to
L
Linus Torvalds 已提交
3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158
		 * independently alter their MTU?  For
		 * an active-backup bond, slaves need
		 * not be the same type of device, so
		 * MTUs may vary.  For other modes,
		 * slaves arguably should have the
		 * same MTUs. To do this, we'd need to
		 * take over the slave's change_mtu
		 * function for the duration of their
		 * servitude.
		 */
		break;
	case NETDEV_CHANGENAME:
3159
		/* we don't care if we don't have primary set */
3160
		if (!bond_uses_primary(bond) ||
3161 3162 3163
		    !bond->params.primary[0])
			break;

3164
		if (slave == primary) {
3165
			/* slave's name changed - he's no longer primary */
3166
			RCU_INIT_POINTER(bond->primary_slave, NULL);
3167 3168
		} else if (!strcmp(slave_dev->name, bond->params.primary)) {
			/* we have a new primary slave */
3169
			rcu_assign_pointer(bond->primary_slave, slave);
3170 3171 3172 3173
		} else { /* we didn't change primary - exit */
			break;
		}

3174
		netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3175
			    primary ? slave_dev->name : "none");
3176 3177

		block_netpoll_tx();
3178
		bond_select_active_slave(bond);
3179
		unblock_netpoll_tx();
L
Linus Torvalds 已提交
3180
		break;
3181 3182 3183
	case NETDEV_FEAT_CHANGE:
		bond_compute_features(bond);
		break;
3184 3185 3186 3187
	case NETDEV_RESEND_IGMP:
		/* Propagate to master device */
		call_netdevice_notifiers(event, slave->bond->dev);
		break;
L
Linus Torvalds 已提交
3188 3189 3190 3191 3192 3193 3194
	default:
		break;
	}

	return NOTIFY_DONE;
}

3195
/* bond_netdev_event: handle netdev notifier chain events.
L
Linus Torvalds 已提交
3196 3197
 *
 * This function receives events for the netdev chain.  The caller (an
3198
 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
L
Linus Torvalds 已提交
3199 3200 3201
 * locks for us to safely manipulate the slave devices (RTNL lock,
 * dev_probe_lock).
 */
S
Stephen Hemminger 已提交
3202 3203
static int bond_netdev_event(struct notifier_block *this,
			     unsigned long event, void *ptr)
L
Linus Torvalds 已提交
3204
{
3205
	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
L
Linus Torvalds 已提交
3206

3207
	netdev_dbg(event_dev, "event: %lx\n", event);
L
Linus Torvalds 已提交
3208

3209 3210 3211
	if (!(event_dev->priv_flags & IFF_BONDING))
		return NOTIFY_DONE;

L
Linus Torvalds 已提交
3212
	if (event_dev->flags & IFF_MASTER) {
3213
		netdev_dbg(event_dev, "IFF_MASTER\n");
L
Linus Torvalds 已提交
3214 3215 3216 3217
		return bond_master_netdev_event(event, event_dev);
	}

	if (event_dev->flags & IFF_SLAVE) {
3218
		netdev_dbg(event_dev, "IFF_SLAVE\n");
L
Linus Torvalds 已提交
3219 3220 3221 3222 3223 3224 3225 3226 3227 3228
		return bond_slave_netdev_event(event, event_dev);
	}

	return NOTIFY_DONE;
}

static struct notifier_block bond_netdev_notifier = {
	.notifier_call = bond_netdev_event,
};

3229 3230
/*---------------------------- Hashing Policies -----------------------------*/

3231 3232
/* L2 hash helper */
static inline u32 bond_eth_hash(struct sk_buff *skb)
3233
{
3234
	struct ethhdr *ep, hdr_tmp;
3235

3236 3237 3238
	ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
	if (ep)
		return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
3239 3240 3241
	return 0;
}

3242 3243 3244
/* Extract the appropriate headers based on bond's xmit policy */
static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
			      struct flow_keys *fk)
3245
{
3246
	const struct ipv6hdr *iph6;
3247
	const struct iphdr *iph;
3248
	int noff, proto = -1;
3249

3250
	if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
3251
		return skb_flow_dissect_flow_keys(skb, fk, 0);
3252

3253
	fk->ports.ports = 0;
3254 3255
	noff = skb_network_offset(skb);
	if (skb->protocol == htons(ETH_P_IP)) {
3256
		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
3257
			return false;
3258
		iph = ip_hdr(skb);
3259
		iph_to_flow_copy_v4addrs(fk, iph);
3260 3261 3262 3263
		noff += iph->ihl << 2;
		if (!ip_is_fragment(iph))
			proto = iph->protocol;
	} else if (skb->protocol == htons(ETH_P_IPV6)) {
3264
		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
3265 3266
			return false;
		iph6 = ipv6_hdr(skb);
3267
		iph_to_flow_copy_v6addrs(fk, iph6);
3268 3269 3270 3271
		noff += sizeof(*iph6);
		proto = iph6->nexthdr;
	} else {
		return false;
3272
	}
3273
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
3274
		fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
3275

3276
	return true;
3277 3278
}

3279 3280 3281 3282 3283 3284 3285
/**
 * bond_xmit_hash - generate a hash value based on the xmit policy
 * @bond: bonding device
 * @skb: buffer to use for headers
 *
 * This function will extract the necessary headers from the skb buffer and use
 * them to generate a hash based on the xmit_policy set in the bonding device
3286
 */
3287
u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
3288
{
3289 3290
	struct flow_keys flow;
	u32 hash;
3291

E
Eric Dumazet 已提交
3292 3293 3294 3295
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
	    skb->l4_hash)
		return skb->hash;

3296 3297
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
	    !bond_flow_dissect(bond, skb, &flow))
3298
		return bond_eth_hash(skb);
3299

3300 3301 3302 3303
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
		hash = bond_eth_hash(skb);
	else
3304
		hash = (__force u32)flow.ports.ports;
3305 3306
	hash ^= (__force u32)flow_get_u32_dst(&flow) ^
		(__force u32)flow_get_u32_src(&flow);
3307 3308 3309
	hash ^= (hash >> 16);
	hash ^= (hash >> 8);

3310
	return hash >> 1;
3311 3312
}

L
Linus Torvalds 已提交
3313 3314
/*-------------------------- Device entry points ----------------------------*/

3315
void bond_work_init_all(struct bonding *bond)
3316 3317 3318 3319 3320
{
	INIT_DELAYED_WORK(&bond->mcast_work,
			  bond_resend_igmp_join_requests_delayed);
	INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
	INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3321
	INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
3322
	INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3323
	INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
3324 3325 3326 3327 3328 3329 3330 3331 3332
}

static void bond_work_cancel_all(struct bonding *bond)
{
	cancel_delayed_work_sync(&bond->mii_work);
	cancel_delayed_work_sync(&bond->arp_work);
	cancel_delayed_work_sync(&bond->alb_work);
	cancel_delayed_work_sync(&bond->ad_work);
	cancel_delayed_work_sync(&bond->mcast_work);
3333
	cancel_delayed_work_sync(&bond->slave_arr_work);
3334 3335
}

L
Linus Torvalds 已提交
3336 3337
static int bond_open(struct net_device *bond_dev)
{
3338
	struct bonding *bond = netdev_priv(bond_dev);
3339
	struct list_head *iter;
3340
	struct slave *slave;
L
Linus Torvalds 已提交
3341

3342
	/* reset slave->backup and slave->inactive */
3343
	if (bond_has_slaves(bond)) {
3344
		bond_for_each_slave(bond, slave, iter) {
3345 3346
			if (bond_uses_primary(bond) &&
			    slave != rcu_access_pointer(bond->curr_active_slave)) {
3347 3348
				bond_set_slave_inactive_flags(slave,
							      BOND_SLAVE_NOTIFY_NOW);
3349
			} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
3350 3351
				bond_set_slave_active_flags(slave,
							    BOND_SLAVE_NOTIFY_NOW);
3352 3353 3354 3355
			}
		}
	}

3356
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
3357 3358 3359
		/* bond_alb_initialize must be called before the timer
		 * is started.
		 */
3360
		if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3361
			return -ENOMEM;
3362
		if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
3363
			queue_delayed_work(bond->wq, &bond->alb_work, 0);
L
Linus Torvalds 已提交
3364 3365
	}

3366
	if (bond->params.miimon)  /* link check interval, in milliseconds. */
3367
		queue_delayed_work(bond->wq, &bond->mii_work, 0);
L
Linus Torvalds 已提交
3368 3369

	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
3370
		queue_delayed_work(bond->wq, &bond->arp_work, 0);
3371
		bond->recv_probe = bond_arp_rcv;
L
Linus Torvalds 已提交
3372 3373
	}

3374
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3375
		queue_delayed_work(bond->wq, &bond->ad_work, 0);
L
Linus Torvalds 已提交
3376
		/* register to receive LACPDUs */
3377
		bond->recv_probe = bond_3ad_lacpdu_recv;
3378
		bond_3ad_initiate_agg_selection(bond, 1);
L
Linus Torvalds 已提交
3379 3380
	}

3381
	if (bond_mode_can_use_xmit_hash(bond))
3382 3383
		bond_update_slave_arr(bond, NULL);

L
Linus Torvalds 已提交
3384 3385 3386 3387 3388
	return 0;
}

static int bond_close(struct net_device *bond_dev)
{
3389
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3390

3391
	bond_work_cancel_all(bond);
3392
	bond->send_peer_notif = 0;
3393
	if (bond_is_lb(bond))
L
Linus Torvalds 已提交
3394
		bond_alb_deinitialize(bond);
3395
	bond->recv_probe = NULL;
L
Linus Torvalds 已提交
3396 3397 3398 3399

	return 0;
}

E
Eric Dumazet 已提交
3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414
/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
 * that some drivers can provide 32bit values only.
 */
static void bond_fold_stats(struct rtnl_link_stats64 *_res,
			    const struct rtnl_link_stats64 *_new,
			    const struct rtnl_link_stats64 *_old)
{
	const u64 *new = (const u64 *)_new;
	const u64 *old = (const u64 *)_old;
	u64 *res = (u64 *)_res;
	int i;

	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
		u64 nv = new[i];
		u64 ov = old[i];
3415
		s64 delta = nv - ov;
E
Eric Dumazet 已提交
3416 3417 3418

		/* detects if this particular field is 32bit only */
		if (((nv | ov) >> 32) == 0)
3419 3420 3421 3422 3423 3424 3425
			delta = (s64)(s32)((u32)nv - (u32)ov);

		/* filter anomalies, some drivers reset their stats
		 * at down/up events.
		 */
		if (delta > 0)
			res[i] += delta;
E
Eric Dumazet 已提交
3426 3427 3428
	}
}

3429 3430 3431 3432 3433 3434 3435
static int bond_get_nest_level(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);

	return bond->nest_level;
}

3436 3437
static void bond_get_stats(struct net_device *bond_dev,
			   struct rtnl_link_stats64 *stats)
L
Linus Torvalds 已提交
3438
{
3439
	struct bonding *bond = netdev_priv(bond_dev);
3440
	struct rtnl_link_stats64 temp;
3441
	struct list_head *iter;
L
Linus Torvalds 已提交
3442 3443
	struct slave *slave;

3444
	spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
3445
	memcpy(stats, &bond->bond_stats, sizeof(*stats));
L
Linus Torvalds 已提交
3446

E
Eric Dumazet 已提交
3447 3448 3449
	rcu_read_lock();
	bond_for_each_slave_rcu(bond, slave, iter) {
		const struct rtnl_link_stats64 *new =
3450
			dev_get_stats(slave->dev, &temp);
E
Eric Dumazet 已提交
3451 3452

		bond_fold_stats(stats, new, &slave->slave_stats);
3453 3454

		/* save off the slave stats for the next run */
E
Eric Dumazet 已提交
3455
		memcpy(&slave->slave_stats, new, sizeof(*new));
3456
	}
E
Eric Dumazet 已提交
3457 3458
	rcu_read_unlock();

3459
	memcpy(&bond->bond_stats, stats, sizeof(*stats));
E
Eric Dumazet 已提交
3460
	spin_unlock(&bond->stats_lock);
L
Linus Torvalds 已提交
3461 3462 3463 3464
}

static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
3465
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3466 3467 3468 3469 3470 3471
	struct net_device *slave_dev = NULL;
	struct ifbond k_binfo;
	struct ifbond __user *u_binfo = NULL;
	struct ifslave k_sinfo;
	struct ifslave __user *u_sinfo = NULL;
	struct mii_ioctl_data *mii = NULL;
3472
	struct bond_opt_value newval;
3473
	struct net *net;
L
Linus Torvalds 已提交
3474 3475
	int res = 0;

3476
	netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
L
Linus Torvalds 已提交
3477 3478 3479 3480

	switch (cmd) {
	case SIOCGMIIPHY:
		mii = if_mii(ifr);
S
Stephen Hemminger 已提交
3481
		if (!mii)
L
Linus Torvalds 已提交
3482
			return -EINVAL;
S
Stephen Hemminger 已提交
3483

L
Linus Torvalds 已提交
3484 3485 3486
		mii->phy_id = 0;
		/* Fall Through */
	case SIOCGMIIREG:
3487
		/* We do this again just in case we were called by SIOCGMIIREG
L
Linus Torvalds 已提交
3488 3489 3490
		 * instead of SIOCGMIIPHY.
		 */
		mii = if_mii(ifr);
S
Stephen Hemminger 已提交
3491
		if (!mii)
L
Linus Torvalds 已提交
3492
			return -EINVAL;
S
Stephen Hemminger 已提交
3493

L
Linus Torvalds 已提交
3494 3495
		if (mii->reg_num == 1) {
			mii->val_out = 0;
S
Stephen Hemminger 已提交
3496
			if (netif_carrier_ok(bond->dev))
L
Linus Torvalds 已提交
3497 3498 3499 3500 3501 3502 3503 3504
				mii->val_out = BMSR_LSTATUS;
		}

		return 0;
	case BOND_INFO_QUERY_OLD:
	case SIOCBONDINFOQUERY:
		u_binfo = (struct ifbond __user *)ifr->ifr_data;

S
Stephen Hemminger 已提交
3505
		if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
L
Linus Torvalds 已提交
3506 3507
			return -EFAULT;

3508 3509
		bond_info_query(bond_dev, &k_binfo);
		if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
S
Stephen Hemminger 已提交
3510
			return -EFAULT;
L
Linus Torvalds 已提交
3511

3512
		return 0;
L
Linus Torvalds 已提交
3513 3514 3515 3516
	case BOND_SLAVE_INFO_QUERY_OLD:
	case SIOCBONDSLAVEINFOQUERY:
		u_sinfo = (struct ifslave __user *)ifr->ifr_data;

S
Stephen Hemminger 已提交
3517
		if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
L
Linus Torvalds 已提交
3518 3519 3520
			return -EFAULT;

		res = bond_slave_info_query(bond_dev, &k_sinfo);
S
Stephen Hemminger 已提交
3521 3522 3523
		if (res == 0 &&
		    copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
			return -EFAULT;
L
Linus Torvalds 已提交
3524 3525 3526 3527 3528 3529

		return res;
	default:
		break;
	}

3530 3531 3532
	net = dev_net(bond_dev);

	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
L
Linus Torvalds 已提交
3533 3534
		return -EPERM;

3535
	slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
L
Linus Torvalds 已提交
3536

3537
	netdev_dbg(bond_dev, "slave_dev=%p:\n", slave_dev);
L
Linus Torvalds 已提交
3538

S
Stephen Hemminger 已提交
3539
	if (!slave_dev)
3540
		return -ENODEV;
L
Linus Torvalds 已提交
3541

3542
	netdev_dbg(bond_dev, "slave_dev->name=%s:\n", slave_dev->name);
3543 3544 3545
	switch (cmd) {
	case BOND_ENSLAVE_OLD:
	case SIOCBONDENSLAVE:
D
David Ahern 已提交
3546
		res = bond_enslave(bond_dev, slave_dev, NULL);
3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558
		break;
	case BOND_RELEASE_OLD:
	case SIOCBONDRELEASE:
		res = bond_release(bond_dev, slave_dev);
		break;
	case BOND_SETHWADDR_OLD:
	case SIOCBONDSETHWADDR:
		bond_set_dev_addr(bond_dev, slave_dev);
		res = 0;
		break;
	case BOND_CHANGE_ACTIVE_OLD:
	case SIOCBONDCHANGEACTIVE:
3559
		bond_opt_initstr(&newval, slave_dev->name);
3560 3561
		res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
					    &newval);
3562 3563 3564
		break;
	default:
		res = -EOPNOTSUPP;
L
Linus Torvalds 已提交
3565 3566 3567 3568 3569
	}

	return res;
}

3570
static void bond_change_rx_flags(struct net_device *bond_dev, int change)
L
Linus Torvalds 已提交
3571
{
3572
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3573

3574 3575 3576
	if (change & IFF_PROMISC)
		bond_set_promiscuity(bond,
				     bond_dev->flags & IFF_PROMISC ? 1 : -1);
S
Stephen Hemminger 已提交
3577

3578 3579 3580 3581
	if (change & IFF_ALLMULTI)
		bond_set_allmulti(bond,
				  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
}
L
Linus Torvalds 已提交
3582

3583
static void bond_set_rx_mode(struct net_device *bond_dev)
3584 3585
{
	struct bonding *bond = netdev_priv(bond_dev);
3586
	struct list_head *iter;
3587
	struct slave *slave;
L
Linus Torvalds 已提交
3588

3589
	rcu_read_lock();
3590
	if (bond_uses_primary(bond)) {
3591
		slave = rcu_dereference(bond->curr_active_slave);
3592 3593 3594 3595 3596
		if (slave) {
			dev_uc_sync(slave->dev, bond_dev);
			dev_mc_sync(slave->dev, bond_dev);
		}
	} else {
3597
		bond_for_each_slave_rcu(bond, slave, iter) {
3598 3599 3600
			dev_uc_sync_multiple(slave->dev, bond_dev);
			dev_mc_sync_multiple(slave->dev, bond_dev);
		}
L
Linus Torvalds 已提交
3601
	}
3602
	rcu_read_unlock();
L
Linus Torvalds 已提交
3603 3604
}

3605
static int bond_neigh_init(struct neighbour *n)
3606
{
3607 3608 3609
	struct bonding *bond = netdev_priv(n->dev);
	const struct net_device_ops *slave_ops;
	struct neigh_parms parms;
3610
	struct slave *slave;
3611 3612
	int ret;

3613
	slave = bond_first_slave(bond);
3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625
	if (!slave)
		return 0;
	slave_ops = slave->dev->netdev_ops;
	if (!slave_ops->ndo_neigh_setup)
		return 0;

	parms.neigh_setup = NULL;
	parms.neigh_cleanup = NULL;
	ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
	if (ret)
		return ret;

3626
	/* Assign slave's neigh_cleanup to neighbour in case cleanup is called
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638
	 * after the last slave has been detached.  Assumes that all slaves
	 * utilize the same neigh_cleanup (true at this writing as only user
	 * is ipoib).
	 */
	n->parms->neigh_cleanup = parms.neigh_cleanup;

	if (!parms.neigh_setup)
		return 0;

	return parms.neigh_setup(n);
}

3639
/* The bonding ndo_neigh_setup is called at init time beofre any
3640 3641
 * slave exists. So we must declare proxy setup function which will
 * be used at run time to resolve the actual slave neigh param setup.
3642 3643 3644 3645
 *
 * It's also called by master devices (such as vlans) to setup their
 * underlying devices. In that case - do nothing, we're already set up from
 * our init.
3646 3647 3648 3649
 */
static int bond_neigh_setup(struct net_device *dev,
			    struct neigh_parms *parms)
{
3650 3651 3652
	/* modify only our neigh_parms */
	if (parms->dev == dev)
		parms->neigh_setup = bond_neigh_init;
3653 3654 3655 3656

	return 0;
}

3657
/* Change the MTU of all of a master's slaves to match the master */
L
Linus Torvalds 已提交
3658 3659
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
3660
	struct bonding *bond = netdev_priv(bond_dev);
3661
	struct slave *slave, *rollback_slave;
3662
	struct list_head *iter;
L
Linus Torvalds 已提交
3663 3664
	int res = 0;

3665
	netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
L
Linus Torvalds 已提交
3666

3667
	bond_for_each_slave(bond, slave, iter) {
3668 3669
		netdev_dbg(bond_dev, "s %p c_m %p\n",
			   slave, slave->dev->netdev_ops->ndo_change_mtu);
3670

L
Linus Torvalds 已提交
3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681
		res = dev_set_mtu(slave->dev, new_mtu);

		if (res) {
			/* If we failed to set the slave's mtu to the new value
			 * we must abort the operation even in ACTIVE_BACKUP
			 * mode, because if we allow the backup slaves to have
			 * different mtu values than the active slave we'll
			 * need to change their mtu when doing a failover. That
			 * means changing their mtu from timer context, which
			 * is probably not a good idea.
			 */
3682 3683
			netdev_dbg(bond_dev, "err %d %s\n", res,
				   slave->dev->name);
L
Linus Torvalds 已提交
3684 3685 3686 3687 3688 3689 3690 3691 3692 3693
			goto unwind;
		}
	}

	bond_dev->mtu = new_mtu;

	return 0;

unwind:
	/* unwind from head to the slave that failed */
3694
	bond_for_each_slave(bond, rollback_slave, iter) {
L
Linus Torvalds 已提交
3695 3696
		int tmp_res;

3697 3698 3699 3700
		if (rollback_slave == slave)
			break;

		tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
L
Linus Torvalds 已提交
3701
		if (tmp_res) {
3702 3703
			netdev_dbg(bond_dev, "unwind err %d dev %s\n",
				   tmp_res, rollback_slave->dev->name);
L
Linus Torvalds 已提交
3704 3705 3706 3707 3708 3709
		}
	}

	return res;
}

3710
/* Change HW address
L
Linus Torvalds 已提交
3711 3712 3713 3714 3715 3716 3717
 *
 * Note that many devices must be down to change the HW address, and
 * downing the master releases all slaves.  We can make bonds full of
 * bonding devices to test this, however.
 */
static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
3718
	struct bonding *bond = netdev_priv(bond_dev);
3719
	struct slave *slave, *rollback_slave;
3720
	struct sockaddr_storage *ss = addr, tmp_ss;
3721
	struct list_head *iter;
L
Linus Torvalds 已提交
3722 3723
	int res = 0;

3724
	if (BOND_MODE(bond) == BOND_MODE_ALB)
3725 3726 3727
		return bond_alb_set_mac_address(bond_dev, addr);


3728
	netdev_dbg(bond_dev, "bond=%p\n", bond);
L
Linus Torvalds 已提交
3729

3730 3731
	/* If fail_over_mac is enabled, do nothing and return success.
	 * Returning an error causes ifenslave to fail.
3732
	 */
3733
	if (bond->params.fail_over_mac &&
3734
	    BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3735
		return 0;
3736

3737
	if (!is_valid_ether_addr(ss->__data))
L
Linus Torvalds 已提交
3738 3739
		return -EADDRNOTAVAIL;

3740
	bond_for_each_slave(bond, slave, iter) {
3741
		netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name);
L
Linus Torvalds 已提交
3742 3743 3744 3745 3746 3747 3748 3749
		res = dev_set_mac_address(slave->dev, addr);
		if (res) {
			/* TODO: consider downing the slave
			 * and retry ?
			 * User should expect communications
			 * breakage anyway until ARP finish
			 * updating, so...
			 */
3750
			netdev_dbg(bond_dev, "err %d %s\n", res, slave->dev->name);
L
Linus Torvalds 已提交
3751 3752 3753 3754 3755
			goto unwind;
		}
	}

	/* success */
3756
	memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
L
Linus Torvalds 已提交
3757 3758 3759
	return 0;

unwind:
3760 3761
	memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
	tmp_ss.ss_family = bond_dev->type;
L
Linus Torvalds 已提交
3762 3763

	/* unwind from head to the slave that failed */
3764
	bond_for_each_slave(bond, rollback_slave, iter) {
L
Linus Torvalds 已提交
3765 3766
		int tmp_res;

3767 3768 3769
		if (rollback_slave == slave)
			break;

3770 3771
		tmp_res = dev_set_mac_address(rollback_slave->dev,
					      (struct sockaddr *)&tmp_ss);
L
Linus Torvalds 已提交
3772
		if (tmp_res) {
3773 3774
			netdev_dbg(bond_dev, "unwind err %d dev %s\n",
				   tmp_res, rollback_slave->dev->name);
L
Linus Torvalds 已提交
3775 3776 3777 3778 3779 3780
		}
	}

	return res;
}

3781 3782 3783 3784 3785 3786 3787 3788 3789 3790
/**
 * bond_xmit_slave_id - transmit skb through slave with slave_id
 * @bond: bonding device that is transmitting
 * @skb: buffer to transmit
 * @slave_id: slave id up to slave_cnt-1 through which to transmit
 *
 * This function tries to transmit through slave with slave_id but in case
 * it fails, it tries to find the first available slave for transmission.
 * The skb is consumed in all cases, thus the function is void.
 */
3791
static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
3792
{
3793
	struct list_head *iter;
3794 3795 3796 3797
	struct slave *slave;
	int i = slave_id;

	/* Here we start from the slave with slave_id */
3798
	bond_for_each_slave_rcu(bond, slave, iter) {
3799
		if (--i < 0) {
3800
			if (bond_slave_can_tx(slave)) {
3801 3802 3803 3804 3805 3806 3807 3808
				bond_dev_queue_xmit(bond, skb, slave->dev);
				return;
			}
		}
	}

	/* Here we start from the first slave up to slave_id */
	i = slave_id;
3809
	bond_for_each_slave_rcu(bond, slave, iter) {
3810 3811
		if (--i < 0)
			break;
3812
		if (bond_slave_can_tx(slave)) {
3813 3814 3815 3816 3817
			bond_dev_queue_xmit(bond, skb, slave->dev);
			return;
		}
	}
	/* no slave that can tx has been found */
E
Eric Dumazet 已提交
3818
	bond_tx_drop(bond->dev, skb);
3819 3820
}

3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831
/**
 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
 * @bond: bonding device to use
 *
 * Based on the value of the bonding device's packets_per_slave parameter
 * this function generates a slave id, which is usually used as the next
 * slave to transmit through.
 */
static u32 bond_rr_gen_slave_id(struct bonding *bond)
{
	u32 slave_id;
3832 3833
	struct reciprocal_value reciprocal_packets_per_slave;
	int packets_per_slave = bond->params.packets_per_slave;
3834 3835 3836 3837 3838 3839 3840 3841 3842

	switch (packets_per_slave) {
	case 0:
		slave_id = prandom_u32();
		break;
	case 1:
		slave_id = bond->rr_tx_counter;
		break;
	default:
3843 3844
		reciprocal_packets_per_slave =
			bond->params.reciprocal_packets_per_slave;
3845
		slave_id = reciprocal_divide(bond->rr_tx_counter,
3846
					     reciprocal_packets_per_slave);
3847 3848 3849 3850 3851 3852 3853
		break;
	}
	bond->rr_tx_counter++;

	return slave_id;
}

3854 3855
static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
					struct net_device *bond_dev)
L
Linus Torvalds 已提交
3856
{
3857
	struct bonding *bond = netdev_priv(bond_dev);
3858
	struct iphdr *iph = ip_hdr(skb);
3859
	struct slave *slave;
3860
	u32 slave_id;
L
Linus Torvalds 已提交
3861

3862
	/* Start with the curr_active_slave that joined the bond as the
3863 3864 3865 3866
	 * default for sending IGMP traffic.  For failover purposes one
	 * needs to maintain some consistency for the interface that will
	 * send the join/membership reports.  The curr_active_slave found
	 * will send all of this type of traffic.
3867
	 */
3868
	if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
3869
		slave = rcu_dereference(bond->curr_active_slave);
3870
		if (slave)
3871 3872 3873
			bond_dev_queue_xmit(bond, skb, slave->dev);
		else
			bond_xmit_slave_id(bond, skb, 0);
3874
	} else {
3875
		int slave_cnt = READ_ONCE(bond->slave_cnt);
3876 3877 3878 3879 3880

		if (likely(slave_cnt)) {
			slave_id = bond_rr_gen_slave_id(bond);
			bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
		} else {
E
Eric Dumazet 已提交
3881
			bond_tx_drop(bond_dev, skb);
3882
		}
L
Linus Torvalds 已提交
3883
	}
3884

3885
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
3886 3887
}

3888
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
L
Linus Torvalds 已提交
3889 3890
 * the bond has a usable interface.
 */
3891 3892
static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
					  struct net_device *bond_dev)
L
Linus Torvalds 已提交
3893
{
3894
	struct bonding *bond = netdev_priv(bond_dev);
3895
	struct slave *slave;
L
Linus Torvalds 已提交
3896

3897
	slave = rcu_dereference(bond->curr_active_slave);
3898
	if (slave)
3899 3900
		bond_dev_queue_xmit(bond, skb, slave->dev);
	else
E
Eric Dumazet 已提交
3901
		bond_tx_drop(bond_dev, skb);
3902

3903
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
3904 3905
}

3906 3907 3908
/* Use this to update slave_array when (a) it's not appropriate to update
 * slave_array right away (note that update_slave_array() may sleep)
 * and / or (b) RTNL is not held.
L
Linus Torvalds 已提交
3909
 */
3910
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
L
Linus Torvalds 已提交
3911
{
3912 3913
	queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
}
L
Linus Torvalds 已提交
3914

3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940
/* Slave array work handler. Holds only RTNL */
static void bond_slave_arr_handler(struct work_struct *work)
{
	struct bonding *bond = container_of(work, struct bonding,
					    slave_arr_work.work);
	int ret;

	if (!rtnl_trylock())
		goto err;

	ret = bond_update_slave_arr(bond, NULL);
	rtnl_unlock();
	if (ret) {
		pr_warn_ratelimited("Failed to update slave array from WT\n");
		goto err;
	}
	return;

err:
	bond_slave_arr_work_rearm(bond, 1);
}

/* Build the usable slaves array in control path for modes that use xmit-hash
 * to determine the slave interface -
 * (a) BOND_MODE_8023AD
 * (b) BOND_MODE_XOR
3941
 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993
 *
 * The caller is expected to hold RTNL only and NO other lock!
 */
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
{
	struct slave *slave;
	struct list_head *iter;
	struct bond_up_slave *new_arr, *old_arr;
	int agg_id = 0;
	int ret = 0;

#ifdef CONFIG_LOCKDEP
	WARN_ON(lockdep_is_held(&bond->mode_lock));
#endif

	new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]),
			  GFP_KERNEL);
	if (!new_arr) {
		ret = -ENOMEM;
		pr_err("Failed to build slave-array.\n");
		goto out;
	}
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
		struct ad_info ad_info;

		if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
			pr_debug("bond_3ad_get_active_agg_info failed\n");
			kfree_rcu(new_arr, rcu);
			/* No active aggragator means it's not safe to use
			 * the previous array.
			 */
			old_arr = rtnl_dereference(bond->slave_arr);
			if (old_arr) {
				RCU_INIT_POINTER(bond->slave_arr, NULL);
				kfree_rcu(old_arr, rcu);
			}
			goto out;
		}
		agg_id = ad_info.aggregator_id;
	}
	bond_for_each_slave(bond, slave, iter) {
		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			struct aggregator *agg;

			agg = SLAVE_AD_INFO(slave)->port.aggregator;
			if (!agg || agg->aggregator_identifier != agg_id)
				continue;
		}
		if (!bond_slave_can_tx(slave))
			continue;
		if (skipslave == slave)
			continue;
3994 3995 3996 3997 3998

		netdev_dbg(bond->dev,
			   "Adding slave dev %s to tx hash array[%d]\n",
			   slave->dev->name, new_arr->count);

3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034
		new_arr->arr[new_arr->count++] = slave;
	}

	old_arr = rtnl_dereference(bond->slave_arr);
	rcu_assign_pointer(bond->slave_arr, new_arr);
	if (old_arr)
		kfree_rcu(old_arr, rcu);
out:
	if (ret != 0 && skipslave) {
		int idx;

		/* Rare situation where caller has asked to skip a specific
		 * slave but allocation failed (most likely!). BTW this is
		 * only possible when the call is initiated from
		 * __bond_release_one(). In this situation; overwrite the
		 * skipslave entry in the array with the last entry from the
		 * array to avoid a situation where the xmit path may choose
		 * this to-be-skipped slave to send a packet out.
		 */
		old_arr = rtnl_dereference(bond->slave_arr);
		for (idx = 0; idx < old_arr->count; idx++) {
			if (skipslave == old_arr->arr[idx]) {
				old_arr->arr[idx] =
				    old_arr->arr[old_arr->count-1];
				old_arr->count--;
				break;
			}
		}
	}
	return ret;
}

/* Use this Xmit function for 3AD as well as XOR modes. The current
 * usable slave array is formed in the control path. The xmit function
 * just calculates hash and sends the packet out.
 */
4035 4036
static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
				     struct net_device *dev)
4037 4038 4039 4040 4041 4042 4043
{
	struct bonding *bond = netdev_priv(dev);
	struct slave *slave;
	struct bond_up_slave *slaves;
	unsigned int count;

	slaves = rcu_dereference(bond->slave_arr);
4044
	count = slaves ? READ_ONCE(slaves->count) : 0;
4045 4046 4047 4048
	if (likely(count)) {
		slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
		bond_dev_queue_xmit(bond, skb, slave->dev);
	} else {
E
Eric Dumazet 已提交
4049
		bond_tx_drop(dev, skb);
4050
	}
4051

4052
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
4053 4054
}

4055
/* in broadcast mode, we send everything to all usable interfaces. */
4056 4057
static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
				       struct net_device *bond_dev)
L
Linus Torvalds 已提交
4058
{
4059
	struct bonding *bond = netdev_priv(bond_dev);
4060
	struct slave *slave = NULL;
4061
	struct list_head *iter;
L
Linus Torvalds 已提交
4062

4063
	bond_for_each_slave_rcu(bond, slave, iter) {
4064 4065
		if (bond_is_last_slave(bond, slave))
			break;
4066
		if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
4067
			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
L
Linus Torvalds 已提交
4068

4069
			if (!skb2) {
4070 4071
				net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
						    bond_dev->name, __func__);
4072
				continue;
L
Linus Torvalds 已提交
4073
			}
4074
			bond_dev_queue_xmit(bond, skb2, slave->dev);
L
Linus Torvalds 已提交
4075 4076
		}
	}
4077
	if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
4078 4079
		bond_dev_queue_xmit(bond, skb, slave->dev);
	else
E
Eric Dumazet 已提交
4080
		bond_tx_drop(bond_dev, skb);
S
Stephen Hemminger 已提交
4081

4082
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
4083 4084 4085 4086
}

/*------------------------- Device initialization ---------------------------*/

4087
/* Lookup the slave that corresponds to a qid */
4088 4089 4090 4091
static inline int bond_slave_override(struct bonding *bond,
				      struct sk_buff *skb)
{
	struct slave *slave = NULL;
4092
	struct list_head *iter;
4093

4094
	if (!skb_rx_queue_recorded(skb))
4095
		return 1;
4096 4097

	/* Find out if any slaves have the same mapping as this skb. */
4098
	bond_for_each_slave_rcu(bond, slave, iter) {
4099
		if (slave->queue_id == skb_get_queue_mapping(skb)) {
4100 4101
			if (bond_slave_is_up(slave) &&
			    slave->link == BOND_LINK_UP) {
4102 4103 4104 4105
				bond_dev_queue_xmit(bond, skb, slave->dev);
				return 0;
			}
			/* If the slave isn't UP, use default transmit policy. */
4106 4107 4108 4109
			break;
		}
	}

4110
	return 1;
4111 4112
}

4113

4114
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
4115 4116
			     struct net_device *sb_dev,
			     select_queue_fallback_t fallback)
4117
{
4118
	/* This helper function exists to help dev_pick_tx get the correct
P
Phil Oester 已提交
4119
	 * destination queue.  Using a helper function skips a call to
4120 4121 4122
	 * skb_tx_hash and will put the skbs in the queue we expect on their
	 * way down to the bonding driver.
	 */
P
Phil Oester 已提交
4123 4124
	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;

4125
	/* Save the original txq to restore before passing to the driver */
4126
	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
4127

P
Phil Oester 已提交
4128
	if (unlikely(txq >= dev->real_num_tx_queues)) {
4129
		do {
P
Phil Oester 已提交
4130
			txq -= dev->real_num_tx_queues;
4131
		} while (txq >= dev->real_num_tx_queues);
P
Phil Oester 已提交
4132 4133
	}
	return txq;
4134 4135
}

4136
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4137
{
4138 4139
	struct bonding *bond = netdev_priv(dev);

4140 4141 4142
	if (bond_should_override_tx_queue(bond) &&
	    !bond_slave_override(bond, skb))
		return NETDEV_TX_OK;
4143

4144
	switch (BOND_MODE(bond)) {
4145 4146 4147 4148
	case BOND_MODE_ROUNDROBIN:
		return bond_xmit_roundrobin(skb, dev);
	case BOND_MODE_ACTIVEBACKUP:
		return bond_xmit_activebackup(skb, dev);
4149
	case BOND_MODE_8023AD:
4150
	case BOND_MODE_XOR:
4151
		return bond_3ad_xor_xmit(skb, dev);
4152 4153 4154 4155
	case BOND_MODE_BROADCAST:
		return bond_xmit_broadcast(skb, dev);
	case BOND_MODE_ALB:
		return bond_alb_xmit(skb, dev);
4156 4157
	case BOND_MODE_TLB:
		return bond_tlb_xmit(skb, dev);
4158 4159
	default:
		/* Should never happen, mode already checked */
4160
		netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
4161
		WARN_ON_ONCE(1);
E
Eric Dumazet 已提交
4162
		bond_tx_drop(dev, skb);
4163 4164 4165 4166
		return NETDEV_TX_OK;
	}
}

4167 4168 4169 4170 4171
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct bonding *bond = netdev_priv(dev);
	netdev_tx_t ret = NETDEV_TX_OK;

4172
	/* If we risk deadlock from transmitting this in the
4173 4174
	 * netpoll path, tell netpoll to queue the frame for later tx
	 */
4175
	if (unlikely(is_netpoll_tx_blocked(dev)))
4176 4177
		return NETDEV_TX_BUSY;

4178
	rcu_read_lock();
4179
	if (bond_has_slaves(bond))
4180 4181
		ret = __bond_start_xmit(skb, dev);
	else
E
Eric Dumazet 已提交
4182
		bond_tx_drop(dev, skb);
4183
	rcu_read_unlock();
4184 4185 4186

	return ret;
}
4187

4188 4189
static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
					   struct ethtool_link_ksettings *cmd)
4190 4191 4192
{
	struct bonding *bond = netdev_priv(bond_dev);
	unsigned long speed = 0;
4193
	struct list_head *iter;
4194
	struct slave *slave;
4195

4196 4197
	cmd->base.duplex = DUPLEX_UNKNOWN;
	cmd->base.port = PORT_OTHER;
4198

4199
	/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
4200 4201 4202 4203
	 * do not need to check mode.  Though link speed might not represent
	 * the true receive or transmit bandwidth (not all modes are symmetric)
	 * this is an accurate maximum.
	 */
4204
	bond_for_each_slave(bond, slave, iter) {
4205
		if (bond_slave_can_tx(slave)) {
4206 4207
			if (slave->speed != SPEED_UNKNOWN)
				speed += slave->speed;
4208
			if (cmd->base.duplex == DUPLEX_UNKNOWN &&
4209
			    slave->duplex != DUPLEX_UNKNOWN)
4210
				cmd->base.duplex = slave->duplex;
4211 4212
		}
	}
4213
	cmd->base.speed = speed ? : SPEED_UNKNOWN;
4214

4215 4216 4217
	return 0;
}

4218
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4219
				     struct ethtool_drvinfo *drvinfo)
4220
{
4221 4222 4223 4224
	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
		 BOND_ABI_VERSION);
4225 4226
}

4227
static const struct ethtool_ops bond_ethtool_ops = {
4228
	.get_drvinfo		= bond_ethtool_get_drvinfo,
4229
	.get_link		= ethtool_op_get_link,
4230
	.get_link_ksettings	= bond_ethtool_get_link_ksettings,
4231 4232
};

4233
static const struct net_device_ops bond_netdev_ops = {
4234
	.ndo_init		= bond_init,
S
Stephen Hemminger 已提交
4235
	.ndo_uninit		= bond_uninit,
4236 4237
	.ndo_open		= bond_open,
	.ndo_stop		= bond_close,
4238
	.ndo_start_xmit		= bond_start_xmit,
4239
	.ndo_select_queue	= bond_select_queue,
4240
	.ndo_get_stats64	= bond_get_stats,
4241
	.ndo_do_ioctl		= bond_do_ioctl,
4242
	.ndo_change_rx_flags	= bond_change_rx_flags,
4243
	.ndo_set_rx_mode	= bond_set_rx_mode,
4244
	.ndo_change_mtu		= bond_change_mtu,
J
Jiri Pirko 已提交
4245
	.ndo_set_mac_address	= bond_set_mac_address,
4246
	.ndo_neigh_setup	= bond_neigh_setup,
J
Jiri Pirko 已提交
4247
	.ndo_vlan_rx_add_vid	= bond_vlan_rx_add_vid,
4248
	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
4249
	.ndo_get_lock_subclass  = bond_get_nest_level,
4250
#ifdef CONFIG_NET_POLL_CONTROLLER
4251
	.ndo_netpoll_setup	= bond_netpoll_setup,
4252 4253 4254
	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
	.ndo_poll_controller	= bond_poll_controller,
#endif
J
Jiri Pirko 已提交
4255 4256
	.ndo_add_slave		= bond_enslave,
	.ndo_del_slave		= bond_release,
4257
	.ndo_fix_features	= bond_fix_features,
4258
	.ndo_features_check	= passthru_features_check,
4259 4260
};

4261 4262 4263 4264
static const struct device_type bond_type = {
	.name = "bond",
};

4265 4266 4267 4268 4269 4270 4271
static void bond_destructor(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);
	if (bond->wq)
		destroy_workqueue(bond->wq);
}

4272
void bond_setup(struct net_device *bond_dev)
L
Linus Torvalds 已提交
4273
{
4274
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
4275

4276
	spin_lock_init(&bond->mode_lock);
E
Eric Dumazet 已提交
4277
	spin_lock_init(&bond->stats_lock);
4278
	bond->params = bonding_defaults;
L
Linus Torvalds 已提交
4279 4280 4281 4282 4283

	/* Initialize pointers */
	bond->dev = bond_dev;

	/* Initialize the device entry points */
4284
	ether_setup(bond_dev);
W
WANG Cong 已提交
4285
	bond_dev->max_mtu = ETH_MAX_MTU;
4286
	bond_dev->netdev_ops = &bond_netdev_ops;
4287
	bond_dev->ethtool_ops = &bond_ethtool_ops;
L
Linus Torvalds 已提交
4288

4289 4290
	bond_dev->needs_free_netdev = true;
	bond_dev->priv_destructor = bond_destructor;
L
Linus Torvalds 已提交
4291

4292 4293
	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);

L
Linus Torvalds 已提交
4294
	/* Initialize the device options */
4295
	bond_dev->flags |= IFF_MASTER;
4296
	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
4297
	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
4298

4299
	/* don't acquire bond device's netif_tx_lock when transmitting */
L
Linus Torvalds 已提交
4300 4301 4302 4303 4304 4305 4306 4307 4308
	bond_dev->features |= NETIF_F_LLTX;

	/* By default, we declare the bond to be fully
	 * VLAN hardware accelerated capable. Special
	 * care is taken in the various xmit functions
	 * when there are slaves that are not hw accel
	 * capable
	 */

4309 4310 4311
	/* Don't allow bond devices to change network namespaces. */
	bond_dev->features |= NETIF_F_NETNS_LOCAL;

4312
	bond_dev->hw_features = BOND_VLAN_FEATURES |
4313 4314 4315
				NETIF_F_HW_VLAN_CTAG_TX |
				NETIF_F_HW_VLAN_CTAG_RX |
				NETIF_F_HW_VLAN_CTAG_FILTER;
4316

4317
	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
4318
	bond_dev->features |= bond_dev->hw_features;
L
Linus Torvalds 已提交
4319 4320
}

4321 4322 4323
/* Destroy a bonding device.
 * Must be under rtnl_lock when this function is called.
 */
4324
static void bond_uninit(struct net_device *bond_dev)
J
Jay Vosburgh 已提交
4325
{
4326
	struct bonding *bond = netdev_priv(bond_dev);
4327 4328
	struct list_head *iter;
	struct slave *slave;
4329
	struct bond_up_slave *arr;
J
Jay Vosburgh 已提交
4330

4331 4332
	bond_netpoll_cleanup(bond_dev);

4333
	/* Release the bonded slaves */
4334
	bond_for_each_slave(bond, slave, iter)
4335
		__bond_release_one(bond_dev, slave->dev, true, true);
4336
	netdev_info(bond_dev, "Released all slaves\n");
4337

4338 4339 4340 4341 4342 4343
	arr = rtnl_dereference(bond->slave_arr);
	if (arr) {
		RCU_INIT_POINTER(bond->slave_arr, NULL);
		kfree_rcu(arr, rcu);
	}

J
Jay Vosburgh 已提交
4344 4345
	list_del(&bond->bond_list);

4346
	bond_debug_unregister(bond);
J
Jay Vosburgh 已提交
4347 4348
}

L
Linus Torvalds 已提交
4349 4350 4351 4352
/*------------------------- Module initialization ---------------------------*/

static int bond_check_params(struct bond_params *params)
{
4353
	int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4354 4355
	struct bond_opt_value newval;
	const struct bond_opt_value *valptr;
4356
	int arp_all_targets_value = 0;
4357
	u16 ad_actor_sys_prio = 0;
4358
	u16 ad_user_port_key = 0;
4359
	__be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
4360 4361 4362 4363
	int arp_ip_count;
	int bond_mode	= BOND_MODE_ROUNDROBIN;
	int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
	int lacp_fast = 0;
4364
	int tlb_dynamic_lb;
4365

4366
	/* Convert string parameters. */
L
Linus Torvalds 已提交
4367
	if (mode) {
4368 4369 4370 4371
		bond_opt_initstr(&newval, mode);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
		if (!valptr) {
			pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
L
Linus Torvalds 已提交
4372 4373
			return -EINVAL;
		}
4374
		bond_mode = valptr->value;
L
Linus Torvalds 已提交
4375 4376
	}

4377
	if (xmit_hash_policy) {
4378 4379 4380
		if (bond_mode == BOND_MODE_ROUNDROBIN ||
		    bond_mode == BOND_MODE_ACTIVEBACKUP ||
		    bond_mode == BOND_MODE_BROADCAST) {
J
Joe Perches 已提交
4381
			pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
J
Joe Perches 已提交
4382
				bond_mode_name(bond_mode));
4383
		} else {
4384 4385 4386 4387
			bond_opt_initstr(&newval, xmit_hash_policy);
			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
						&newval);
			if (!valptr) {
J
Joe Perches 已提交
4388
				pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
4389 4390 4391
				       xmit_hash_policy);
				return -EINVAL;
			}
4392
			xmit_hashtype = valptr->value;
4393 4394 4395
		}
	}

L
Linus Torvalds 已提交
4396 4397
	if (lacp_rate) {
		if (bond_mode != BOND_MODE_8023AD) {
J
Joe Perches 已提交
4398 4399
			pr_info("lacp_rate param is irrelevant in mode %s\n",
				bond_mode_name(bond_mode));
L
Linus Torvalds 已提交
4400
		} else {
4401 4402 4403 4404
			bond_opt_initstr(&newval, lacp_rate);
			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
						&newval);
			if (!valptr) {
J
Joe Perches 已提交
4405
				pr_err("Error: Invalid lacp rate \"%s\"\n",
4406
				       lacp_rate);
L
Linus Torvalds 已提交
4407 4408
				return -EINVAL;
			}
4409
			lacp_fast = valptr->value;
L
Linus Torvalds 已提交
4410 4411 4412
		}
	}

4413
	if (ad_select) {
4414
		bond_opt_initstr(&newval, ad_select);
4415 4416 4417 4418
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
					&newval);
		if (!valptr) {
			pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
4419 4420
			return -EINVAL;
		}
4421 4422
		params->ad_select = valptr->value;
		if (bond_mode != BOND_MODE_8023AD)
4423
			pr_warn("ad_select param only affects 802.3ad mode\n");
4424 4425 4426 4427
	} else {
		params->ad_select = BOND_AD_STABLE;
	}

4428
	if (max_bonds < 0) {
4429 4430
		pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
			max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
L
Linus Torvalds 已提交
4431 4432 4433 4434
		max_bonds = BOND_DEFAULT_MAX_BONDS;
	}

	if (miimon < 0) {
4435 4436
		pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			miimon, INT_MAX);
4437
		miimon = 0;
L
Linus Torvalds 已提交
4438 4439 4440
	}

	if (updelay < 0) {
4441 4442
		pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			updelay, INT_MAX);
L
Linus Torvalds 已提交
4443 4444 4445 4446
		updelay = 0;
	}

	if (downdelay < 0) {
4447 4448
		pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			downdelay, INT_MAX);
L
Linus Torvalds 已提交
4449 4450 4451
		downdelay = 0;
	}

4452 4453
	if ((use_carrier != 0) && (use_carrier != 1)) {
		pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
4454
			use_carrier);
L
Linus Torvalds 已提交
4455 4456 4457
		use_carrier = 1;
	}

4458
	if (num_peer_notif < 0 || num_peer_notif > 255) {
4459 4460
		pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
			num_peer_notif);
4461 4462 4463
		num_peer_notif = 1;
	}

4464
	/* reset values for 802.3ad/TLB/ALB */
4465
	if (!bond_mode_uses_arp(bond_mode)) {
L
Linus Torvalds 已提交
4466
		if (!miimon) {
4467 4468
			pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
			pr_warn("Forcing miimon to 100msec\n");
4469
			miimon = BOND_DEFAULT_MIIMON;
L
Linus Torvalds 已提交
4470 4471 4472
		}
	}

4473
	if (tx_queues < 1 || tx_queues > 255) {
4474 4475
		pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
			tx_queues, BOND_DEFAULT_TX_QUEUES);
4476 4477 4478
		tx_queues = BOND_DEFAULT_TX_QUEUES;
	}

4479
	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
4480 4481
		pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
			all_slaves_active);
4482 4483 4484
		all_slaves_active = 0;
	}

4485
	if (resend_igmp < 0 || resend_igmp > 255) {
4486 4487
		pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
			resend_igmp, BOND_DEFAULT_RESEND_IGMP);
4488 4489 4490
		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
	}

4491 4492
	bond_opt_initval(&newval, packets_per_slave);
	if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
4493 4494 4495 4496 4497
		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
			packets_per_slave, USHRT_MAX);
		packets_per_slave = 1;
	}

L
Linus Torvalds 已提交
4498
	if (bond_mode == BOND_MODE_ALB) {
J
Joe Perches 已提交
4499 4500
		pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
			  updelay);
L
Linus Torvalds 已提交
4501 4502 4503 4504 4505 4506 4507
	}

	if (!miimon) {
		if (updelay || downdelay) {
			/* just warn the user the up/down delay will have
			 * no effect since miimon is zero...
			 */
4508 4509
			pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
				updelay, downdelay);
L
Linus Torvalds 已提交
4510 4511 4512 4513
		}
	} else {
		/* don't allow arp monitoring */
		if (arp_interval) {
4514 4515
			pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
				miimon, arp_interval);
L
Linus Torvalds 已提交
4516 4517 4518 4519
			arp_interval = 0;
		}

		if ((updelay % miimon) != 0) {
4520 4521
			pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
				updelay, miimon, (updelay / miimon) * miimon);
L
Linus Torvalds 已提交
4522 4523 4524 4525 4526
		}

		updelay /= miimon;

		if ((downdelay % miimon) != 0) {
4527 4528 4529
			pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
				downdelay, miimon,
				(downdelay / miimon) * miimon);
L
Linus Torvalds 已提交
4530 4531 4532 4533 4534 4535
		}

		downdelay /= miimon;
	}

	if (arp_interval < 0) {
4536 4537
		pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			arp_interval, INT_MAX);
4538
		arp_interval = 0;
L
Linus Torvalds 已提交
4539 4540
	}

4541 4542
	for (arp_ip_count = 0, i = 0;
	     (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
4543
		__be32 ip;
4544 4545

		/* not a complete check, but good enough to catch mistakes */
4546
		if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
4547
		    !bond_is_ip_target_ok(ip)) {
4548 4549
			pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
				arp_ip_target[i]);
L
Linus Torvalds 已提交
4550 4551
			arp_interval = 0;
		} else {
4552 4553 4554
			if (bond_get_targets_ip(arp_target, ip) == -1)
				arp_target[arp_ip_count++] = ip;
			else
4555 4556
				pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
					&ip);
L
Linus Torvalds 已提交
4557 4558 4559 4560 4561
		}
	}

	if (arp_interval && !arp_ip_count) {
		/* don't allow arping if no arp_ip_target given... */
4562 4563
		pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
			arp_interval);
L
Linus Torvalds 已提交
4564 4565 4566
		arp_interval = 0;
	}

4567 4568
	if (arp_validate) {
		if (!arp_interval) {
J
Joe Perches 已提交
4569
			pr_err("arp_validate requires arp_interval\n");
4570 4571 4572
			return -EINVAL;
		}

4573 4574 4575 4576
		bond_opt_initstr(&newval, arp_validate);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4577
			pr_err("Error: invalid arp_validate \"%s\"\n",
4578
			       arp_validate);
4579 4580
			return -EINVAL;
		}
4581 4582
		arp_validate_value = valptr->value;
	} else {
4583
		arp_validate_value = 0;
4584
	}
4585

4586
	if (arp_all_targets) {
4587 4588 4589 4590
		bond_opt_initstr(&newval, arp_all_targets);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
					&newval);
		if (!valptr) {
4591 4592 4593
			pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
			       arp_all_targets);
			arp_all_targets_value = 0;
4594 4595
		} else {
			arp_all_targets_value = valptr->value;
4596 4597 4598
		}
	}

L
Linus Torvalds 已提交
4599
	if (miimon) {
J
Joe Perches 已提交
4600
		pr_info("MII link monitoring set to %d ms\n", miimon);
L
Linus Torvalds 已提交
4601
	} else if (arp_interval) {
4602 4603
		valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
					  arp_validate_value);
J
Joe Perches 已提交
4604
		pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
4605
			arp_interval, valptr->string, arp_ip_count);
L
Linus Torvalds 已提交
4606 4607

		for (i = 0; i < arp_ip_count; i++)
J
Joe Perches 已提交
4608
			pr_cont(" %s", arp_ip_target[i]);
L
Linus Torvalds 已提交
4609

J
Joe Perches 已提交
4610
		pr_cont("\n");
L
Linus Torvalds 已提交
4611

4612
	} else if (max_bonds) {
L
Linus Torvalds 已提交
4613 4614 4615
		/* miimon and arp_interval not set, we need one so things
		 * work as expected, see bonding.txt for details
		 */
J
Joe Perches 已提交
4616
		pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
L
Linus Torvalds 已提交
4617 4618
	}

4619
	if (primary && !bond_mode_uses_primary(bond_mode)) {
L
Linus Torvalds 已提交
4620 4621 4622
		/* currently, using a primary only makes sense
		 * in active backup, TLB or ALB modes
		 */
4623 4624
		pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
			primary, bond_mode_name(bond_mode));
L
Linus Torvalds 已提交
4625 4626 4627
		primary = NULL;
	}

4628
	if (primary && primary_reselect) {
4629 4630 4631 4632
		bond_opt_initstr(&newval, primary_reselect);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4633
			pr_err("Error: Invalid primary_reselect \"%s\"\n",
4634
			       primary_reselect);
4635 4636
			return -EINVAL;
		}
4637
		primary_reselect_value = valptr->value;
4638 4639 4640 4641
	} else {
		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
	}

4642
	if (fail_over_mac) {
4643 4644 4645 4646
		bond_opt_initstr(&newval, fail_over_mac);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4647
			pr_err("Error: invalid fail_over_mac \"%s\"\n",
4648
			       fail_over_mac);
4649 4650
			return -EINVAL;
		}
4651
		fail_over_mac_value = valptr->value;
4652
		if (bond_mode != BOND_MODE_ACTIVEBACKUP)
4653
			pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
4654 4655 4656
	} else {
		fail_over_mac_value = BOND_FOM_NONE;
	}
4657

4658 4659 4660 4661 4662 4663 4664 4665 4666 4667
	bond_opt_initstr(&newval, "default");
	valptr = bond_opt_parse(
			bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
				     &newval);
	if (!valptr) {
		pr_err("Error: No ad_actor_sys_prio default value");
		return -EINVAL;
	}
	ad_actor_sys_prio = valptr->value;

4668 4669 4670 4671 4672 4673 4674 4675
	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
				&newval);
	if (!valptr) {
		pr_err("Error: No ad_user_port_key default value");
		return -EINVAL;
	}
	ad_user_port_key = valptr->value;

4676 4677 4678 4679 4680
	bond_opt_initstr(&newval, "default");
	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
	if (!valptr) {
		pr_err("Error: No tlb_dynamic_lb default value");
		return -EINVAL;
4681
	}
4682
	tlb_dynamic_lb = valptr->value;
4683

4684
	if (lp_interval == 0) {
4685 4686
		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
			INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
4687 4688 4689
		lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
	}

L
Linus Torvalds 已提交
4690 4691
	/* fill params struct with the proper values */
	params->mode = bond_mode;
4692
	params->xmit_policy = xmit_hashtype;
L
Linus Torvalds 已提交
4693
	params->miimon = miimon;
4694
	params->num_peer_notif = num_peer_notif;
L
Linus Torvalds 已提交
4695
	params->arp_interval = arp_interval;
4696
	params->arp_validate = arp_validate_value;
4697
	params->arp_all_targets = arp_all_targets_value;
L
Linus Torvalds 已提交
4698 4699 4700 4701 4702
	params->updelay = updelay;
	params->downdelay = downdelay;
	params->use_carrier = use_carrier;
	params->lacp_fast = lacp_fast;
	params->primary[0] = 0;
4703
	params->primary_reselect = primary_reselect_value;
4704
	params->fail_over_mac = fail_over_mac_value;
4705
	params->tx_queues = tx_queues;
4706
	params->all_slaves_active = all_slaves_active;
4707
	params->resend_igmp = resend_igmp;
4708
	params->min_links = min_links;
4709
	params->lp_interval = lp_interval;
4710
	params->packets_per_slave = packets_per_slave;
4711
	params->tlb_dynamic_lb = tlb_dynamic_lb;
4712
	params->ad_actor_sys_prio = ad_actor_sys_prio;
4713
	eth_zero_addr(params->ad_actor_system);
4714
	params->ad_user_port_key = ad_user_port_key;
4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725
	if (packets_per_slave > 0) {
		params->reciprocal_packets_per_slave =
			reciprocal_value(packets_per_slave);
	} else {
		/* reciprocal_packets_per_slave is unused if
		 * packets_per_slave is 0 or 1, just initialize it
		 */
		params->reciprocal_packets_per_slave =
			(struct reciprocal_value) { 0 };
	}

L
Linus Torvalds 已提交
4726 4727 4728 4729 4730 4731 4732 4733 4734 4735
	if (primary) {
		strncpy(params->primary, primary, IFNAMSIZ);
		params->primary[IFNAMSIZ - 1] = 0;
	}

	memcpy(params->arp_targets, arp_target, sizeof(arp_target));

	return 0;
}

4736
/* Called from registration process */
4737 4738 4739
static int bond_init(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);
4740
	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
4741

4742
	netdev_dbg(bond_dev, "Begin bond_init\n");
4743

4744
	bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
4745 4746 4747
	if (!bond->wq)
		return -ENOMEM;

4748
	bond->nest_level = SINGLE_DEPTH_NESTING;
4749
	netdev_lockdep_set_classes(bond_dev);
4750

4751
	list_add_tail(&bond->bond_list, &bn->dev_list);
4752

4753
	bond_prepare_sysfs_group(bond);
4754

4755 4756
	bond_debug_register(bond);

4757 4758
	/* Ensure valid dev_addr */
	if (is_zero_ether_addr(bond_dev->dev_addr) &&
4759
	    bond_dev->addr_assign_type == NET_ADDR_PERM)
4760 4761
		eth_hw_addr_random(bond_dev);

4762 4763 4764
	return 0;
}

4765
unsigned int bond_get_num_tx_queues(void)
4766
{
4767
	return tx_queues;
4768 4769
}

4770
/* Create a new bond based on the specified name and bonding parameters.
4771
 * If name is NULL, obtain a suitable "bond%d" name for us.
4772 4773 4774
 * Caller must NOT hold rtnl_lock; we need to release it here before we
 * set up our sysfs entries.
 */
4775
int bond_create(struct net *net, const char *name)
4776 4777
{
	struct net_device *bond_dev;
4778 4779
	struct bonding *bond;
	struct alb_bond_info *bond_info;
4780 4781 4782
	int res;

	rtnl_lock();
4783

4784
	bond_dev = alloc_netdev_mq(sizeof(struct bonding),
4785
				   name ? name : "bond%d", NET_NAME_UNKNOWN,
4786
				   bond_setup, tx_queues);
4787
	if (!bond_dev) {
J
Joe Perches 已提交
4788
		pr_err("%s: eek! can't alloc netdev!\n", name);
4789 4790
		rtnl_unlock();
		return -ENOMEM;
4791 4792
	}

4793 4794 4795 4796 4797 4798 4799 4800
	/*
	 * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
	 * It is set to 0 by default which is wrong.
	 */
	bond = netdev_priv(bond_dev);
	bond_info = &(BOND_ALB_INFO(bond));
	bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;

4801
	dev_net_set(bond_dev, net);
4802 4803
	bond_dev->rtnl_link_ops = &bond_link_ops;

4804
	res = register_netdevice(bond_dev);
4805

4806 4807
	netif_carrier_off(bond_dev);

4808 4809
	bond_work_init_all(bond);

4810
	rtnl_unlock();
4811
	if (res < 0)
4812
		free_netdev(bond_dev);
E
Eric W. Biederman 已提交
4813
	return res;
4814 4815
}

4816
static int __net_init bond_net_init(struct net *net)
4817
{
4818
	struct bond_net *bn = net_generic(net, bond_net_id);
4819 4820 4821 4822 4823

	bn->net = net;
	INIT_LIST_HEAD(&bn->dev_list);

	bond_create_proc_dir(bn);
4824
	bond_create_sysfs(bn);
4825

4826
	return 0;
4827 4828
}

4829
static void __net_exit bond_net_exit(struct net *net)
4830
{
4831
	struct bond_net *bn = net_generic(net, bond_net_id);
4832 4833
	struct bonding *bond, *tmp_bond;
	LIST_HEAD(list);
4834

4835
	bond_destroy_sysfs(bn);
4836 4837 4838 4839 4840 4841 4842

	/* Kill off any bonds created after unregistering bond rtnl ops */
	rtnl_lock();
	list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
		unregister_netdevice_queue(bond->dev, &list);
	unregister_netdevice_many(&list);
	rtnl_unlock();
4843 4844

	bond_destroy_proc_dir(bn);
4845 4846 4847 4848 4849
}

static struct pernet_operations bond_net_ops = {
	.init = bond_net_init,
	.exit = bond_net_exit,
4850 4851
	.id   = &bond_net_id,
	.size = sizeof(struct bond_net),
4852 4853
};

L
Linus Torvalds 已提交
4854 4855 4856 4857 4858
static int __init bonding_init(void)
{
	int i;
	int res;

4859
	pr_info("%s", bond_version);
L
Linus Torvalds 已提交
4860

4861
	res = bond_check_params(&bonding_defaults);
S
Stephen Hemminger 已提交
4862
	if (res)
4863
		goto out;
L
Linus Torvalds 已提交
4864

4865
	res = register_pernet_subsys(&bond_net_ops);
4866 4867
	if (res)
		goto out;
4868

4869
	res = bond_netlink_init();
4870
	if (res)
4871
		goto err_link;
4872

4873 4874
	bond_create_debugfs();

L
Linus Torvalds 已提交
4875
	for (i = 0; i < max_bonds; i++) {
4876
		res = bond_create(&init_net, NULL);
4877 4878
		if (res)
			goto err;
L
Linus Torvalds 已提交
4879 4880 4881
	}

	register_netdevice_notifier(&bond_netdev_notifier);
4882
out:
L
Linus Torvalds 已提交
4883
	return res;
4884
err:
4885
	bond_destroy_debugfs();
4886
	bond_netlink_fini();
4887
err_link:
4888
	unregister_pernet_subsys(&bond_net_ops);
4889
	goto out;
4890

L
Linus Torvalds 已提交
4891 4892 4893 4894 4895 4896
}

static void __exit bonding_exit(void)
{
	unregister_netdevice_notifier(&bond_netdev_notifier);

4897
	bond_destroy_debugfs();
4898

4899
	bond_netlink_fini();
4900
	unregister_pernet_subsys(&bond_net_ops);
4901 4902

#ifdef CONFIG_NET_POLL_CONTROLLER
4903
	/* Make sure we don't have an imbalance on our netpoll blocking */
4904
	WARN_ON(atomic_read(&netpoll_block_tx));
4905
#endif
L
Linus Torvalds 已提交
4906 4907 4908 4909 4910 4911 4912 4913
}

module_init(bonding_init);
module_exit(bonding_exit);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");