bond_main.c 133.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
 * originally based on the dummy device.
 *
 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
 * Licensed under the GPL. Based on dummy.c, and eql.c devices.
 *
 * bonding.c: an Ethernet Bonding driver
 *
 * This is useful to talk to a Cisco EtherChannel compatible equipment:
 *	Cisco 5500
 *	Sun Trunking (Solaris)
 *	Alteon AceDirector Trunks
 *	Linux Bonding
 *	and probably many L2 switches ...
 *
 * How it works:
 *    ifconfig bond0 ipaddress netmask up
 *      will setup a network device, with an ip address.  No mac address
 *	will be assigned at this time.  The hw mac address will come from
 *	the first slave bonded to the channel.  All slaves will then use
 *	this hw mac address.
 *
 *    ifconfig bond0 down
 *         will release all slaves, marking them as down.
 *
 *    ifenslave bond0 eth0
 *	will attach eth0 to bond0 as a slave.  eth0 hw mac address will either
 *	a: be used as initial mac address
 *	b: if a hw mac address already is there, eth0's hw mac address
 *	   will then be set from bond0.
 *
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
42
#include <net/ip.h>
L
Linus Torvalds 已提交
43
#include <linux/ip.h>
44 45
#include <linux/tcp.h>
#include <linux/udp.h>
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/socket.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/bitops.h>
S
Stephen Hemminger 已提交
54
#include <linux/io.h>
L
Linus Torvalds 已提交
55
#include <asm/dma.h>
S
Stephen Hemminger 已提交
56
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
57 58 59
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
60
#include <linux/igmp.h>
L
Linus Torvalds 已提交
61 62 63 64 65 66 67 68 69 70 71
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/smp.h>
#include <linux/if_ether.h>
#include <net/arp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_bonding.h>
D
David Sterba 已提交
72
#include <linux/jiffies.h>
73
#include <linux/preempt.h>
J
Jay Vosburgh 已提交
74
#include <net/route.h>
75
#include <net/net_namespace.h>
76
#include <net/netns/generic.h>
77
#include <net/pkt_sched.h>
78
#include <linux/rculist.h>
79
#include <net/flow_dissector.h>
80
#include <net/switchdev.h>
81 82 83
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
L
Linus Torvalds 已提交
84

85 86
#include "bonding_priv.h"

L
Linus Torvalds 已提交
87 88 89 90 91
/*---------------------------- Module parameters ----------------------------*/

/* monitor all links that often (in milliseconds). <=0 disables monitoring */

static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
92
static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
93
static int num_peer_notif = 1;
94
static int miimon;
S
Stephen Hemminger 已提交
95 96
static int updelay;
static int downdelay;
L
Linus Torvalds 已提交
97
static int use_carrier	= 1;
S
Stephen Hemminger 已提交
98 99
static char *mode;
static char *primary;
100
static char *primary_reselect;
S
Stephen Hemminger 已提交
101
static char *lacp_rate;
102
static int min_links;
S
Stephen Hemminger 已提交
103 104
static char *ad_select;
static char *xmit_hash_policy;
105
static int arp_interval;
S
Stephen Hemminger 已提交
106 107
static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
108
static char *arp_all_targets;
S
Stephen Hemminger 已提交
109
static char *fail_over_mac;
110
static int all_slaves_active;
111
static struct bond_params bonding_defaults;
112
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
113
static int packets_per_slave = 1;
114
static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
L
Linus Torvalds 已提交
115 116 117

module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
118 119
module_param(tx_queues, int, 0);
MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
120
module_param_named(num_grat_arp, num_peer_notif, int, 0644);
121 122
MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
			       "failover event (alias of num_unsol_na)");
123
module_param_named(num_unsol_na, num_peer_notif, int, 0644);
124 125
MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
			       "failover event (alias of num_grat_arp)");
L
Linus Torvalds 已提交
126 127 128 129 130
module_param(miimon, int, 0);
MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
module_param(updelay, int, 0);
MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
module_param(downdelay, int, 0);
131 132
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
			    "in milliseconds");
L
Linus Torvalds 已提交
133
module_param(use_carrier, int, 0);
134 135
MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
			      "0 for off, 1 for on (default)");
L
Linus Torvalds 已提交
136
module_param(mode, charp, 0);
137
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
138 139 140
		       "1 for active-backup, 2 for balance-xor, "
		       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
		       "6 for balance-alb");
L
Linus Torvalds 已提交
141 142
module_param(primary, charp, 0);
MODULE_PARM_DESC(primary, "Primary network device to use");
143 144 145 146 147 148 149 150
module_param(primary_reselect, charp, 0);
MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
				   "once it comes up; "
				   "0 for always (default), "
				   "1 for only if speed of primary is "
				   "better, "
				   "2 for only on active slave "
				   "failure");
L
Linus Torvalds 已提交
151
module_param(lacp_rate, charp, 0);
152 153
MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
			    "0 for slow, 1 for fast");
154
module_param(ad_select, charp, 0);
155 156 157
MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
			    "0 for stable (default), 1 for bandwidth, "
			    "2 for count");
158 159 160
module_param(min_links, int, 0);
MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");

161
module_param(xmit_hash_policy, charp, 0);
162 163
MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
				   "0 for layer 2 (default), 1 for layer 3+4, "
164 165
				   "2 for layer 2+3, 3 for encap layer 2+3, "
				   "4 for encap layer 3+4");
L
Linus Torvalds 已提交
166 167 168 169
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
170
module_param(arp_validate, charp, 0);
171 172 173
MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
			       "0 for none (default), 1 for active, "
			       "2 for backup, 3 for all");
174 175
module_param(arp_all_targets, charp, 0);
MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
176
module_param(fail_over_mac, charp, 0);
177 178 179
MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
				"the same MAC; 0 for none (default), "
				"1 for active, 2 for follow");
180
module_param(all_slaves_active, int, 0);
181
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
182
				     "by setting active flag for all slaves; "
183
				     "0 for never (default), 1 for always.");
184
module_param(resend_igmp, int, 0);
185 186
MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
			      "link failure");
187 188 189 190
module_param(packets_per_slave, int, 0);
MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
				    "mode; 0 for a random slave, 1 packet per "
				    "slave (default), >1 packets per slave.");
191 192 193 194
module_param(lp_interval, uint, 0);
MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
			      "the bonding driver sends learning packets to "
			      "each slaves peer switch. The default is 1.");
L
Linus Torvalds 已提交
195 196 197

/*----------------------------- Global variables ----------------------------*/

198
#ifdef CONFIG_NET_POLL_CONTROLLER
199
atomic_t netpoll_block_tx = ATOMIC_INIT(0);
200 201
#endif

202
int bond_net_id __read_mostly;
L
Linus Torvalds 已提交
203

S
Stephen Hemminger 已提交
204 205
static __be32 arp_target[BOND_MAX_ARP_TARGETS];
static int arp_ip_count;
L
Linus Torvalds 已提交
206
static int bond_mode	= BOND_MODE_ROUNDROBIN;
S
Stephen Hemminger 已提交
207 208
static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
static int lacp_fast;
L
Linus Torvalds 已提交
209 210 211

/*-------------------------- Forward declarations ---------------------------*/

212
static int bond_init(struct net_device *bond_dev);
213
static void bond_uninit(struct net_device *bond_dev);
214 215
static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
						struct rtnl_link_stats64 *stats);
216
static void bond_slave_arr_handler(struct work_struct *work);
217 218
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
				  int mod);
L
Linus Torvalds 已提交
219 220 221

/*---------------------------- General routines -----------------------------*/

222
const char *bond_mode_name(int mode)
L
Linus Torvalds 已提交
223
{
224 225 226 227 228
	static const char *names[] = {
		[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
		[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
		[BOND_MODE_XOR] = "load balancing (xor)",
		[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
S
Stephen Hemminger 已提交
229
		[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
230 231 232 233
		[BOND_MODE_TLB] = "transmit load balancing",
		[BOND_MODE_ALB] = "adaptive load balancing",
	};

234
	if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
L
Linus Torvalds 已提交
235
		return "unknown";
236 237

	return names[mode];
L
Linus Torvalds 已提交
238 239 240 241 242 243
}

/*---------------------------------- VLAN -----------------------------------*/

/**
 * bond_dev_queue_xmit - Prepare skb for xmit.
S
Stephen Hemminger 已提交
244
 *
L
Linus Torvalds 已提交
245 246 247 248
 * @bond: bond device that got this skb for tx.
 * @skb: hw accel VLAN tagged skb to transmit
 * @slave_dev: slave that is supposed to xmit this skbuff
 */
249
void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
S
Stephen Hemminger 已提交
250
			struct net_device *slave_dev)
L
Linus Torvalds 已提交
251
{
252
	skb->dev = slave_dev;
253

254
	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
255 256
		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
	skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
257

258
	if (unlikely(netpoll_tx_running(bond->dev)))
259
		bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
260
	else
261
		dev_queue_xmit(skb);
L
Linus Torvalds 已提交
262 263
}

264
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
J
Jiri Pirko 已提交
265
 * We don't protect the slave list iteration with a lock because:
L
Linus Torvalds 已提交
266 267 268 269
 * a. This operation is performed in IOCTL context,
 * b. The operation is protected by the RTNL semaphore in the 8021q code,
 * c. Holding a lock with BH disabled while directly calling a base driver
 *    entry point is generally a BAD idea.
S
Stephen Hemminger 已提交
270
 *
L
Linus Torvalds 已提交
271 272 273 274 275 276 277 278 279 280 281 282 283 284
 * The design of synchronization/protection for this operation in the 8021q
 * module is good for one or more VLAN devices over a single physical device
 * and cannot be extended for a teaming solution like bonding, so there is a
 * potential race condition here where a net device from the vlan group might
 * be referenced (either by a base driver or the 8021q code) while it is being
 * removed from the system. However, it turns out we're not making matters
 * worse, and if it works for regular VLAN usage it will work here too.
*/

/**
 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
 * @bond_dev: bonding net device that got called
 * @vid: vlan id being added
 */
285 286
static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
				__be16 proto, u16 vid)
L
Linus Torvalds 已提交
287
{
288
	struct bonding *bond = netdev_priv(bond_dev);
289
	struct slave *slave, *rollback_slave;
290
	struct list_head *iter;
291
	int res;
L
Linus Torvalds 已提交
292

293
	bond_for_each_slave(bond, slave, iter) {
294
		res = vlan_vid_add(slave->dev, proto, vid);
295 296
		if (res)
			goto unwind;
L
Linus Torvalds 已提交
297 298
	}

299
	return 0;
300 301

unwind:
302
	/* unwind to the slave that failed */
303
	bond_for_each_slave(bond, rollback_slave, iter) {
304 305 306 307 308
		if (rollback_slave == slave)
			break;

		vlan_vid_del(rollback_slave->dev, proto, vid);
	}
309 310

	return res;
L
Linus Torvalds 已提交
311 312 313 314 315 316 317
}

/**
 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
 * @bond_dev: bonding net device that got called
 * @vid: vlan id being removed
 */
318 319
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
				 __be16 proto, u16 vid)
L
Linus Torvalds 已提交
320
{
321
	struct bonding *bond = netdev_priv(bond_dev);
322
	struct list_head *iter;
L
Linus Torvalds 已提交
323 324
	struct slave *slave;

325
	bond_for_each_slave(bond, slave, iter)
326
		vlan_vid_del(slave->dev, proto, vid);
L
Linus Torvalds 已提交
327

328 329
	if (bond_is_lb(bond))
		bond_alb_clear_vlan(bond, vid);
330 331

	return 0;
L
Linus Torvalds 已提交
332 333 334 335
}

/*------------------------------- Link status -------------------------------*/

336
/* Set the carrier state for the master according to the state of its
337 338 339 340 341
 * slaves.  If any slaves are up, the master is up.  In 802.3ad mode,
 * do special 802.3ad magic.
 *
 * Returns zero if carrier state does not change, nonzero if it does.
 */
342
int bond_set_carrier(struct bonding *bond)
343
{
344
	struct list_head *iter;
345 346
	struct slave *slave;

347
	if (!bond_has_slaves(bond))
348 349
		goto down;

350
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
351 352
		return bond_3ad_set_carrier(bond);

353
	bond_for_each_slave(bond, slave, iter) {
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
		if (slave->link == BOND_LINK_UP) {
			if (!netif_carrier_ok(bond->dev)) {
				netif_carrier_on(bond->dev);
				return 1;
			}
			return 0;
		}
	}

down:
	if (netif_carrier_ok(bond->dev)) {
		netif_carrier_off(bond->dev);
		return 1;
	}
	return 0;
}

371
/* Get link speed and duplex from the slave's base driver
L
Linus Torvalds 已提交
372
 * using ethtool. If for some reason the call fails or the
373
 * values are invalid, set speed and duplex to -1,
374
 * and return.
L
Linus Torvalds 已提交
375
 */
376
static void bond_update_speed_duplex(struct slave *slave)
L
Linus Torvalds 已提交
377 378
{
	struct net_device *slave_dev = slave->dev;
379
	struct ethtool_cmd ecmd;
380
	u32 slave_speed;
381
	int res;
L
Linus Torvalds 已提交
382

383 384
	slave->speed = SPEED_UNKNOWN;
	slave->duplex = DUPLEX_UNKNOWN;
L
Linus Torvalds 已提交
385

386
	res = __ethtool_get_settings(slave_dev, &ecmd);
387
	if (res < 0)
388
		return;
L
Linus Torvalds 已提交
389

390
	slave_speed = ethtool_cmd_speed(&ecmd);
J
Jiri Pirko 已提交
391
	if (slave_speed == 0 || slave_speed == ((__u32) -1))
392
		return;
L
Linus Torvalds 已提交
393

394
	switch (ecmd.duplex) {
L
Linus Torvalds 已提交
395 396 397 398
	case DUPLEX_FULL:
	case DUPLEX_HALF:
		break;
	default:
399
		return;
L
Linus Torvalds 已提交
400 401
	}

402
	slave->speed = slave_speed;
403
	slave->duplex = ecmd.duplex;
L
Linus Torvalds 已提交
404

405
	return;
L
Linus Torvalds 已提交
406 407
}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
const char *bond_slave_link_status(s8 link)
{
	switch (link) {
	case BOND_LINK_UP:
		return "up";
	case BOND_LINK_FAIL:
		return "going down";
	case BOND_LINK_DOWN:
		return "down";
	case BOND_LINK_BACK:
		return "going back";
	default:
		return "unknown";
	}
}

424
/* if <dev> supports MII link status reporting, check its link status.
L
Linus Torvalds 已提交
425 426
 *
 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
S
Stephen Hemminger 已提交
427
 * depending upon the setting of the use_carrier parameter.
L
Linus Torvalds 已提交
428 429 430 431 432 433 434 435 436 437 438
 *
 * Return either BMSR_LSTATUS, meaning that the link is up (or we
 * can't tell and just pretend it is), or 0, meaning that the link is
 * down.
 *
 * If reporting is non-zero, instead of faking link up, return -1 if
 * both ETHTOOL and MII ioctls fail (meaning the device does not
 * support them).  If use_carrier is set, return whatever it says.
 * It'd be nice if there was a good way to tell if a driver supports
 * netif_carrier, but there really isn't.
 */
S
Stephen Hemminger 已提交
439 440
static int bond_check_dev_link(struct bonding *bond,
			       struct net_device *slave_dev, int reporting)
L
Linus Torvalds 已提交
441
{
442
	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
443
	int (*ioctl)(struct net_device *, struct ifreq *, int);
L
Linus Torvalds 已提交
444 445 446
	struct ifreq ifr;
	struct mii_ioctl_data *mii;

447 448 449
	if (!reporting && !netif_running(slave_dev))
		return 0;

450
	if (bond->params.use_carrier)
L
Linus Torvalds 已提交
451 452
		return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;

453
	/* Try to get link status using Ethtool first. */
454 455 456
	if (slave_dev->ethtool_ops->get_link)
		return slave_dev->ethtool_ops->get_link(slave_dev) ?
			BMSR_LSTATUS : 0;
457

S
Stephen Hemminger 已提交
458
	/* Ethtool can't be used, fallback to MII ioctls. */
459
	ioctl = slave_ops->ndo_do_ioctl;
L
Linus Torvalds 已提交
460
	if (ioctl) {
461 462 463 464 465 466 467 468
		/* TODO: set pointer to correct ioctl on a per team member
		 *       bases to make this more efficient. that is, once
		 *       we determine the correct ioctl, we will always
		 *       call it and not the others for that team
		 *       member.
		 */

		/* We cannot assume that SIOCGMIIPHY will also read a
L
Linus Torvalds 已提交
469 470 471 472 473 474 475 476 477
		 * register; not all network drivers (e.g., e100)
		 * support that.
		 */

		/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
		strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
		mii = if_mii(&ifr);
		if (IOCTL(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
			mii->reg_num = MII_BMSR;
S
Stephen Hemminger 已提交
478 479
			if (IOCTL(slave_dev, &ifr, SIOCGMIIREG) == 0)
				return mii->val_out & BMSR_LSTATUS;
L
Linus Torvalds 已提交
480 481 482
		}
	}

483
	/* If reporting, report that either there's no dev->do_ioctl,
484
	 * or both SIOCGMIIREG and get_link failed (meaning that we
L
Linus Torvalds 已提交
485 486 487
	 * cannot report link status).  If not reporting, pretend
	 * we're ok.
	 */
S
Stephen Hemminger 已提交
488
	return reporting ? -1 : BMSR_LSTATUS;
L
Linus Torvalds 已提交
489 490 491 492
}

/*----------------------------- Multicast list ------------------------------*/

493
/* Push the promiscuity flag down to appropriate slaves */
494
static int bond_set_promiscuity(struct bonding *bond, int inc)
L
Linus Torvalds 已提交
495
{
496
	struct list_head *iter;
497
	int err = 0;
498

499
	if (bond_uses_primary(bond)) {
500
		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
501 502 503

		if (curr_active)
			err = dev_set_promiscuity(curr_active->dev, inc);
L
Linus Torvalds 已提交
504 505
	} else {
		struct slave *slave;
506

507
		bond_for_each_slave(bond, slave, iter) {
508 509 510
			err = dev_set_promiscuity(slave->dev, inc);
			if (err)
				return err;
L
Linus Torvalds 已提交
511 512
		}
	}
513
	return err;
L
Linus Torvalds 已提交
514 515
}

516
/* Push the allmulti flag down to all slaves */
517
static int bond_set_allmulti(struct bonding *bond, int inc)
L
Linus Torvalds 已提交
518
{
519
	struct list_head *iter;
520
	int err = 0;
521

522
	if (bond_uses_primary(bond)) {
523
		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
524 525 526

		if (curr_active)
			err = dev_set_allmulti(curr_active->dev, inc);
L
Linus Torvalds 已提交
527 528
	} else {
		struct slave *slave;
529

530
		bond_for_each_slave(bond, slave, iter) {
531 532 533
			err = dev_set_allmulti(slave->dev, inc);
			if (err)
				return err;
L
Linus Torvalds 已提交
534 535
		}
	}
536
	return err;
L
Linus Torvalds 已提交
537 538
}

539
/* Retrieve the list of registered multicast addresses for the bonding
540 541 542
 * device and retransmit an IGMP JOIN request to the current active
 * slave.
 */
543
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
544
{
545 546 547
	struct bonding *bond = container_of(work, struct bonding,
					    mcast_work.work);

548
	if (!rtnl_trylock()) {
549
		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
550
		return;
551
	}
552
	call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
553

554 555
	if (bond->igmp_retrans > 1) {
		bond->igmp_retrans--;
556
		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
557
	}
558
	rtnl_unlock();
559 560
}

561
/* Flush bond's hardware addresses from slave */
562
static void bond_hw_addr_flush(struct net_device *bond_dev,
S
Stephen Hemminger 已提交
563
			       struct net_device *slave_dev)
L
Linus Torvalds 已提交
564
{
565
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
566

567 568
	dev_uc_unsync(slave_dev, bond_dev);
	dev_mc_unsync(slave_dev, bond_dev);
L
Linus Torvalds 已提交
569

570
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
L
Linus Torvalds 已提交
571 572 573
		/* del lacpdu mc addr from mc list */
		u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;

574
		dev_mc_del(slave_dev, lacpdu_multicast);
L
Linus Torvalds 已提交
575 576 577 578 579
	}
}

/*--------------------------- Active slave change ---------------------------*/

580
/* Update the hardware address list and promisc/allmulti for the new and
581 582
 * old active slaves (if any).  Modes that are not using primary keep all
 * slaves up date at all times; only the modes that use primary need to call
583
 * this function to swap these settings during a failover.
L
Linus Torvalds 已提交
584
 */
585 586
static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
			      struct slave *old_active)
L
Linus Torvalds 已提交
587 588
{
	if (old_active) {
S
Stephen Hemminger 已提交
589
		if (bond->dev->flags & IFF_PROMISC)
L
Linus Torvalds 已提交
590 591
			dev_set_promiscuity(old_active->dev, -1);

S
Stephen Hemminger 已提交
592
		if (bond->dev->flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
593 594
			dev_set_allmulti(old_active->dev, -1);

595
		bond_hw_addr_flush(bond->dev, old_active->dev);
L
Linus Torvalds 已提交
596 597 598
	}

	if (new_active) {
599
		/* FIXME: Signal errors upstream. */
S
Stephen Hemminger 已提交
600
		if (bond->dev->flags & IFF_PROMISC)
L
Linus Torvalds 已提交
601 602
			dev_set_promiscuity(new_active->dev, 1);

S
Stephen Hemminger 已提交
603
		if (bond->dev->flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
604 605
			dev_set_allmulti(new_active->dev, 1);

606
		netif_addr_lock_bh(bond->dev);
607 608
		dev_uc_sync(new_active->dev, bond->dev);
		dev_mc_sync(new_active->dev, bond->dev);
609
		netif_addr_unlock_bh(bond->dev);
L
Linus Torvalds 已提交
610 611 612
	}
}

613 614 615 616 617 618 619 620 621 622
/**
 * bond_set_dev_addr - clone slave's address to bond
 * @bond_dev: bond net device
 * @slave_dev: slave net device
 *
 * Should be called with RTNL held.
 */
static void bond_set_dev_addr(struct net_device *bond_dev,
			      struct net_device *slave_dev)
{
623 624
	netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n",
		   bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len);
625 626 627 628 629
	memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
	bond_dev->addr_assign_type = NET_ADDR_STOLEN;
	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
}

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
static struct slave *bond_get_old_active(struct bonding *bond,
					 struct slave *new_active)
{
	struct slave *slave;
	struct list_head *iter;

	bond_for_each_slave(bond, slave, iter) {
		if (slave == new_active)
			continue;

		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
			return slave;
	}

	return NULL;
}

647
/* bond_do_fail_over_mac
648 649 650
 *
 * Perform special MAC address swapping for fail_over_mac settings
 *
651
 * Called with RTNL
652 653 654 655 656 657 658 659 660 661 662
 */
static void bond_do_fail_over_mac(struct bonding *bond,
				  struct slave *new_active,
				  struct slave *old_active)
{
	u8 tmp_mac[ETH_ALEN];
	struct sockaddr saddr;
	int rv;

	switch (bond->params.fail_over_mac) {
	case BOND_FOM_ACTIVE:
663
		if (new_active)
664
			bond_set_dev_addr(bond->dev, new_active->dev);
665 666
		break;
	case BOND_FOM_FOLLOW:
667
		/* if new_active && old_active, swap them
668 669 670 671 672 673
		 * if just old_active, do nothing (going to no active slave)
		 * if just new_active, set new_active to bond's MAC
		 */
		if (!new_active)
			return;

674 675 676
		if (!old_active)
			old_active = bond_get_old_active(bond, new_active);

677
		if (old_active) {
678
			ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
679 680
			ether_addr_copy(saddr.sa_data,
					old_active->dev->dev_addr);
681 682
			saddr.sa_family = new_active->dev->type;
		} else {
683
			ether_addr_copy(saddr.sa_data, bond->dev->dev_addr);
684 685 686 687 688
			saddr.sa_family = bond->dev->type;
		}

		rv = dev_set_mac_address(new_active->dev, &saddr);
		if (rv) {
689 690
			netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
				   -rv, new_active->dev->name);
691 692 693 694 695 696
			goto out;
		}

		if (!old_active)
			goto out;

697
		ether_addr_copy(saddr.sa_data, tmp_mac);
698 699 700 701
		saddr.sa_family = old_active->dev->type;

		rv = dev_set_mac_address(old_active->dev, &saddr);
		if (rv)
702 703
			netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
				   -rv, new_active->dev->name);
704 705 706
out:
		break;
	default:
707 708
		netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
			   bond->params.fail_over_mac);
709 710 711 712 713
		break;
	}

}

714
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
715
{
716
	struct slave *prim = rtnl_dereference(bond->primary_slave);
717
	struct slave *curr = rtnl_dereference(bond->curr_active_slave);
718

719 720 721 722 723 724
	if (!prim || prim->link != BOND_LINK_UP) {
		if (!curr || curr->link != BOND_LINK_UP)
			return NULL;
		return curr;
	}

725 726
	if (bond->force_primary) {
		bond->force_primary = false;
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
		return prim;
	}

	if (!curr || curr->link != BOND_LINK_UP)
		return prim;

	/* At this point, prim and curr are both up */
	switch (bond->params.primary_reselect) {
	case BOND_PRI_RESELECT_ALWAYS:
		return prim;
	case BOND_PRI_RESELECT_BETTER:
		if (prim->speed < curr->speed)
			return curr;
		if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
			return curr;
		return prim;
	case BOND_PRI_RESELECT_FAILURE:
		return curr;
	default:
		netdev_err(bond->dev, "impossible primary_reselect %d\n",
			   bond->params.primary_reselect);
		return curr;
749 750
	}
}
751

L
Linus Torvalds 已提交
752
/**
753
 * bond_find_best_slave - select the best available slave to be the active one
L
Linus Torvalds 已提交
754 755 756 757
 * @bond: our bonding struct
 */
static struct slave *bond_find_best_slave(struct bonding *bond)
{
758
	struct slave *slave, *bestslave = NULL;
759
	struct list_head *iter;
L
Linus Torvalds 已提交
760 761
	int mintime = bond->params.updelay;

762 763 764
	slave = bond_choose_primary_or_current(bond);
	if (slave)
		return slave;
L
Linus Torvalds 已提交
765

766 767 768
	bond_for_each_slave(bond, slave, iter) {
		if (slave->link == BOND_LINK_UP)
			return slave;
769
		if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
770 771 772
		    slave->delay < mintime) {
			mintime = slave->delay;
			bestslave = slave;
L
Linus Torvalds 已提交
773 774 775 776 777 778
		}
	}

	return bestslave;
}

779 780
static bool bond_should_notify_peers(struct bonding *bond)
{
781 782 783 784 785
	struct slave *slave;

	rcu_read_lock();
	slave = rcu_dereference(bond->curr_active_slave);
	rcu_read_unlock();
786

787 788
	netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
		   slave ? slave->dev->name : "NULL");
789 790

	if (!slave || !bond->send_peer_notif ||
791
	    !netif_carrier_ok(bond->dev) ||
792 793 794 795 796 797
	    test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
		return false;

	return true;
}

L
Linus Torvalds 已提交
798 799 800 801 802 803 804 805 806 807 808 809 810
/**
 * change_active_interface - change the active slave into the specified one
 * @bond: our bonding struct
 * @new: the new slave to make the active one
 *
 * Set the new slave to the bond's settings and unset them on the old
 * curr_active_slave.
 * Setting include flags, mc-list, promiscuity, allmulti, etc.
 *
 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
 * because it is apparently the best available slave we have, even though its
 * updelay hasn't timed out yet.
 *
811
 * Caller must hold RTNL.
L
Linus Torvalds 已提交
812
 */
813
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
L
Linus Torvalds 已提交
814
{
815 816
	struct slave *old_active;

817 818 819
	ASSERT_RTNL();

	old_active = rtnl_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
820

S
Stephen Hemminger 已提交
821
	if (old_active == new_active)
L
Linus Torvalds 已提交
822 823 824
		return;

	if (new_active) {
825
		new_active->last_link_up = jiffies;
826

L
Linus Torvalds 已提交
827
		if (new_active->link == BOND_LINK_BACK) {
828
			if (bond_uses_primary(bond)) {
829 830 831
				netdev_info(bond->dev, "making interface %s the new active one %d ms earlier\n",
					    new_active->dev->name,
					    (bond->params.updelay - new_active->delay) * bond->params.miimon);
L
Linus Torvalds 已提交
832 833 834
			}

			new_active->delay = 0;
835 836
			bond_set_slave_link_state(new_active, BOND_LINK_UP,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
837

838
			if (BOND_MODE(bond) == BOND_MODE_8023AD)
L
Linus Torvalds 已提交
839 840
				bond_3ad_handle_link_change(new_active, BOND_LINK_UP);

841
			if (bond_is_lb(bond))
L
Linus Torvalds 已提交
842 843
				bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
		} else {
844
			if (bond_uses_primary(bond)) {
845 846
				netdev_info(bond->dev, "making interface %s the new active one\n",
					    new_active->dev->name);
L
Linus Torvalds 已提交
847 848 849 850
			}
		}
	}

851
	if (bond_uses_primary(bond))
852
		bond_hw_addr_swap(bond, new_active, old_active);
L
Linus Torvalds 已提交
853

854
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
855
		bond_alb_handle_active_change(bond, new_active);
856
		if (old_active)
857 858
			bond_set_slave_inactive_flags(old_active,
						      BOND_SLAVE_NOTIFY_NOW);
859
		if (new_active)
860 861
			bond_set_slave_active_flags(new_active,
						    BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
862
	} else {
863
		rcu_assign_pointer(bond->curr_active_slave, new_active);
L
Linus Torvalds 已提交
864
	}
J
Jay Vosburgh 已提交
865

866
	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
S
Stephen Hemminger 已提交
867
		if (old_active)
868 869
			bond_set_slave_inactive_flags(old_active,
						      BOND_SLAVE_NOTIFY_NOW);
J
Jay Vosburgh 已提交
870 871

		if (new_active) {
872 873
			bool should_notify_peers = false;

874 875
			bond_set_slave_active_flags(new_active,
						    BOND_SLAVE_NOTIFY_NOW);
876

877 878 879
			if (bond->params.fail_over_mac)
				bond_do_fail_over_mac(bond, new_active,
						      old_active);
880

881 882 883 884 885 886 887
			if (netif_running(bond->dev)) {
				bond->send_peer_notif =
					bond->params.num_peer_notif;
				should_notify_peers =
					bond_should_notify_peers(bond);
			}

888
			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
889
			if (should_notify_peers)
890 891
				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
							 bond->dev);
892
		}
J
Jay Vosburgh 已提交
893
	}
894

895
	/* resend IGMP joins since active slave has changed or
896 897
	 * all were sent on curr_active_slave.
	 * resend only if bond is brought up with the affected
898 899
	 * bonding modes and the retransmission is enabled
	 */
900
	if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
901
	    ((bond_uses_primary(bond) && new_active) ||
902
	     BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
903
		bond->igmp_retrans = bond->params.resend_igmp;
904
		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
905
	}
L
Linus Torvalds 已提交
906 907 908 909 910 911
}

/**
 * bond_select_active_slave - select a new active slave, if needed
 * @bond: our bonding struct
 *
S
Stephen Hemminger 已提交
912
 * This functions should be called when one of the following occurs:
L
Linus Torvalds 已提交
913 914 915 916
 * - The old curr_active_slave has been released or lost its link.
 * - The primary_slave has got its link back.
 * - A slave has got its link back and there's no old curr_active_slave.
 *
917
 * Caller must hold RTNL.
L
Linus Torvalds 已提交
918
 */
919
void bond_select_active_slave(struct bonding *bond)
L
Linus Torvalds 已提交
920 921
{
	struct slave *best_slave;
922
	int rv;
L
Linus Torvalds 已提交
923

924 925
	ASSERT_RTNL();

L
Linus Torvalds 已提交
926
	best_slave = bond_find_best_slave(bond);
927
	if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
L
Linus Torvalds 已提交
928
		bond_change_active_slave(bond, best_slave);
929 930 931 932
		rv = bond_set_carrier(bond);
		if (!rv)
			return;

Z
Zhang Shengju 已提交
933
		if (netif_carrier_ok(bond->dev))
934
			netdev_info(bond->dev, "first active interface up!\n");
Z
Zhang Shengju 已提交
935
		else
936
			netdev_info(bond->dev, "now running without any active interface!\n");
L
Linus Torvalds 已提交
937 938 939
	}
}

940
#ifdef CONFIG_NET_POLL_CONTROLLER
941
static inline int slave_enable_netpoll(struct slave *slave)
942
{
943 944
	struct netpoll *np;
	int err = 0;
945

946
	np = kzalloc(sizeof(*np), GFP_KERNEL);
947 948 949 950
	err = -ENOMEM;
	if (!np)
		goto out;

951
	err = __netpoll_setup(np, slave->dev);
952 953 954
	if (err) {
		kfree(np);
		goto out;
955
	}
956 957 958 959 960 961 962 963 964 965 966 967
	slave->np = np;
out:
	return err;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
	struct netpoll *np = slave->np;

	if (!np)
		return;

	slave->np = NULL;
968
	__netpoll_free_async(np);
969
}
970 971 972

static void bond_poll_controller(struct net_device *bond_dev)
{
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
	struct bonding *bond = netdev_priv(bond_dev);
	struct slave *slave = NULL;
	struct list_head *iter;
	struct ad_info ad_info;
	struct netpoll_info *ni;
	const struct net_device_ops *ops;

	if (BOND_MODE(bond) == BOND_MODE_8023AD)
		if (bond_3ad_get_active_agg_info(bond, &ad_info))
			return;

	bond_for_each_slave_rcu(bond, slave, iter) {
		ops = slave->dev->netdev_ops;
		if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
			continue;

		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			struct aggregator *agg =
			    SLAVE_AD_INFO(slave)->port.aggregator;

			if (agg &&
			    agg->aggregator_identifier != ad_info.aggregator_id)
				continue;
		}

		ni = rcu_dereference_bh(slave->dev->npinfo);
		if (down_trylock(&ni->dev_lock))
			continue;
		ops->ndo_poll_controller(slave->dev);
		up(&ni->dev_lock);
	}
1004 1005
}

1006
static void bond_netpoll_cleanup(struct net_device *bond_dev)
1007
{
1008
	struct bonding *bond = netdev_priv(bond_dev);
1009
	struct list_head *iter;
1010 1011
	struct slave *slave;

1012
	bond_for_each_slave(bond, slave, iter)
1013
		if (bond_slave_is_up(slave))
1014
			slave_disable_netpoll(slave);
1015
}
1016

1017
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1018 1019
{
	struct bonding *bond = netdev_priv(dev);
1020
	struct list_head *iter;
1021
	struct slave *slave;
1022
	int err = 0;
1023

1024
	bond_for_each_slave(bond, slave, iter) {
1025 1026
		err = slave_enable_netpoll(slave);
		if (err) {
1027
			bond_netpoll_cleanup(dev);
1028
			break;
1029 1030
		}
	}
1031
	return err;
1032
}
1033 1034 1035 1036 1037 1038 1039 1040
#else
static inline int slave_enable_netpoll(struct slave *slave)
{
	return 0;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
}
1041 1042 1043 1044 1045
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
}
#endif

L
Linus Torvalds 已提交
1046 1047
/*---------------------------------- IOCTL ----------------------------------*/

1048
static netdev_features_t bond_fix_features(struct net_device *dev,
1049
					   netdev_features_t features)
1050
{
1051
	struct bonding *bond = netdev_priv(dev);
1052
	struct list_head *iter;
1053
	netdev_features_t mask;
1054
	struct slave *slave;
1055

1056
	mask = features;
1057

1058
	features &= ~NETIF_F_ONE_FOR_ALL;
1059
	features |= NETIF_F_ALL_FOR_ALL;
1060

1061
	bond_for_each_slave(bond, slave, iter) {
1062 1063
		features = netdev_increment_features(features,
						     slave->dev->features,
1064 1065
						     mask);
	}
1066
	features = netdev_add_tso_features(features, mask);
1067 1068 1069 1070

	return features;
}

1071
#define BOND_VLAN_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1072 1073
				 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
				 NETIF_F_HIGHDMA | NETIF_F_LRO)
1074

1075 1076
#define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
				 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
1077

1078 1079
static void bond_compute_features(struct bonding *bond)
{
1080 1081
	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
					IFF_XMIT_DST_RELEASE_PERM;
1082
	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1083
	netdev_features_t enc_features  = BOND_ENC_FEATURES;
1084 1085 1086
	struct net_device *bond_dev = bond->dev;
	struct list_head *iter;
	struct slave *slave;
1087
	unsigned short max_hard_header_len = ETH_HLEN;
1088 1089
	unsigned int gso_max_size = GSO_MAX_SIZE;
	u16 gso_max_segs = GSO_MAX_SEGS;
1090

1091
	if (!bond_has_slaves(bond))
1092
		goto done;
1093
	vlan_features &= NETIF_F_ALL_FOR_ALL;
1094

1095
	bond_for_each_slave(bond, slave, iter) {
1096
		vlan_features = netdev_increment_features(vlan_features,
1097 1098
			slave->dev->vlan_features, BOND_VLAN_FEATURES);

1099 1100 1101
		enc_features = netdev_increment_features(enc_features,
							 slave->dev->hw_enc_features,
							 BOND_ENC_FEATURES);
1102
		dst_release_flag &= slave->dev->priv_flags;
1103 1104
		if (slave->dev->hard_header_len > max_hard_header_len)
			max_hard_header_len = slave->dev->hard_header_len;
1105 1106 1107

		gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
		gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
1108
	}
1109

1110
done:
1111
	bond_dev->vlan_features = vlan_features;
E
Eric Dumazet 已提交
1112
	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
1113
	bond_dev->hard_header_len = max_hard_header_len;
1114 1115
	bond_dev->gso_max_segs = gso_max_segs;
	netif_set_gso_max_size(bond_dev, gso_max_size);
1116

1117 1118 1119 1120
	bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
	if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
	    dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
		bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1121

1122
	netdev_change_features(bond_dev);
1123 1124
}

1125 1126 1127
static void bond_setup_by_slave(struct net_device *bond_dev,
				struct net_device *slave_dev)
{
1128
	bond_dev->header_ops	    = slave_dev->header_ops;
1129 1130 1131 1132 1133 1134 1135 1136 1137

	bond_dev->type		    = slave_dev->type;
	bond_dev->hard_header_len   = slave_dev->hard_header_len;
	bond_dev->addr_len	    = slave_dev->addr_len;

	memcpy(bond_dev->broadcast, slave_dev->broadcast,
		slave_dev->addr_len);
}

1138
/* On bonding slaves other than the currently active slave, suppress
1139
 * duplicates except for alb non-mcast/bcast.
1140 1141
 */
static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1142 1143
					    struct slave *slave,
					    struct bonding *bond)
1144
{
1145
	if (bond_is_slave_inactive(slave)) {
1146
		if (BOND_MODE(bond) == BOND_MODE_ALB &&
1147 1148 1149 1150 1151 1152 1153 1154
		    skb->pkt_type != PACKET_BROADCAST &&
		    skb->pkt_type != PACKET_MULTICAST)
			return false;
		return true;
	}
	return false;
}

1155
static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1156
{
1157
	struct sk_buff *skb = *pskb;
1158
	struct slave *slave;
1159
	struct bonding *bond;
1160 1161
	int (*recv_probe)(const struct sk_buff *, struct bonding *,
			  struct slave *);
1162
	int ret = RX_HANDLER_ANOTHER;
1163

1164 1165 1166 1167 1168
	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return RX_HANDLER_CONSUMED;

	*pskb = skb;
1169

J
Jiri Pirko 已提交
1170 1171
	slave = bond_slave_get_rcu(skb->dev);
	bond = slave->bond;
1172

1173 1174
	recv_probe = ACCESS_ONCE(bond->recv_probe);
	if (recv_probe) {
1175 1176 1177 1178
		ret = recv_probe(skb, bond, slave);
		if (ret == RX_HANDLER_CONSUMED) {
			consume_skb(skb);
			return ret;
1179 1180 1181
		}
	}

Z
Zhang Shengju 已提交
1182
	if (bond_should_deliver_exact_match(skb, slave, bond))
1183
		return RX_HANDLER_EXACT;
1184

J
Jiri Pirko 已提交
1185
	skb->dev = bond->dev;
1186

1187
	if (BOND_MODE(bond) == BOND_MODE_ALB &&
J
Jiri Pirko 已提交
1188
	    bond->dev->priv_flags & IFF_BRIDGE_PORT &&
1189 1190
	    skb->pkt_type == PACKET_HOST) {

1191 1192 1193
		if (unlikely(skb_cow_head(skb,
					  skb->data - skb_mac_header(skb)))) {
			kfree_skb(skb);
1194
			return RX_HANDLER_CONSUMED;
1195
		}
1196
		ether_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr);
1197 1198
	}

1199
	return ret;
1200 1201
}

1202
static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1203
{
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
	switch (BOND_MODE(bond)) {
	case BOND_MODE_ROUNDROBIN:
		return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
	case BOND_MODE_ACTIVEBACKUP:
		return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
	case BOND_MODE_BROADCAST:
		return NETDEV_LAG_TX_TYPE_BROADCAST;
	case BOND_MODE_XOR:
	case BOND_MODE_8023AD:
		return NETDEV_LAG_TX_TYPE_HASH;
	default:
		return NETDEV_LAG_TX_TYPE_UNKNOWN;
	}
}

static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave)
{
	struct netdev_lag_upper_info lag_upper_info;
1222 1223
	int err;

1224 1225 1226
	lag_upper_info.tx_type = bond_lag_tx_type(bond);
	err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
					   &lag_upper_info);
1227 1228
	if (err)
		return err;
1229
	rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL);
1230 1231 1232
	return 0;
}

1233
static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1234
{
1235 1236 1237
	netdev_upper_dev_unlink(slave->dev, bond->dev);
	slave->dev->flags &= ~IFF_SLAVE;
	rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL);
1238 1239
}

1240 1241 1242 1243
static struct slave *bond_alloc_slave(struct bonding *bond)
{
	struct slave *slave = NULL;

Z
Zhang Shengju 已提交
1244
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1245 1246 1247
	if (!slave)
		return NULL;

1248
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
		SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
					       GFP_KERNEL);
		if (!SLAVE_AD_INFO(slave)) {
			kfree(slave);
			return NULL;
		}
	}
	return slave;
}

static void bond_free_slave(struct slave *slave)
{
	struct bonding *bond = bond_get_bond_by_slave(slave);

1263
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1264 1265 1266 1267 1268
		kfree(SLAVE_AD_INFO(slave));

	kfree(slave);
}

1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
{
	info->bond_mode = BOND_MODE(bond);
	info->miimon = bond->params.miimon;
	info->num_slaves = bond->slave_cnt;
}

static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
{
	strcpy(info->slave_name, slave->dev->name);
	info->link = slave->link;
	info->state = bond_slave_state(slave);
	info->link_failure_count = slave->link_failure_count;
}

1284 1285
static void bond_netdev_notify(struct net_device *dev,
			       struct netdev_bonding_info *info)
1286 1287
{
	rtnl_lock();
1288
	netdev_bonding_info_change(dev, info);
1289 1290 1291 1292 1293 1294 1295 1296
	rtnl_unlock();
}

static void bond_netdev_notify_work(struct work_struct *_work)
{
	struct netdev_notify_work *w =
		container_of(_work, struct netdev_notify_work, work.work);

1297
	bond_netdev_notify(w->dev, &w->bonding_info);
1298
	dev_put(w->dev);
1299
	kfree(w);
1300 1301 1302 1303
}

void bond_queue_slave_event(struct slave *slave)
{
1304
	struct bonding *bond = slave->bond;
1305 1306 1307 1308 1309
	struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);

	if (!nnw)
		return;

1310
	dev_hold(slave->dev);
1311
	nnw->dev = slave->dev;
1312 1313 1314
	bond_fill_ifslave(slave, &nnw->bonding_info.slave);
	bond_fill_ifbond(bond, &nnw->bonding_info.master);
	INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
1315

1316
	queue_delayed_work(slave->bond->wq, &nnw->work, 0);
1317 1318
}

1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
void bond_lower_state_changed(struct slave *slave)
{
	struct netdev_lag_lower_state_info info;

	info.link_up = slave->link == BOND_LINK_UP ||
		       slave->link == BOND_LINK_FAIL;
	info.tx_enabled = bond_is_active_slave(slave);
	netdev_lower_state_changed(slave->dev, &info);
}

L
Linus Torvalds 已提交
1329
/* enslave device <slave> to bond device <master> */
1330
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
L
Linus Torvalds 已提交
1331
{
1332
	struct bonding *bond = netdev_priv(bond_dev);
1333
	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1334
	struct slave *new_slave = NULL, *prev_slave;
L
Linus Torvalds 已提交
1335 1336
	struct sockaddr addr;
	int link_reporting;
1337
	int res = 0, i;
L
Linus Torvalds 已提交
1338

1339 1340 1341
	if (!bond->params.use_carrier &&
	    slave_dev->ethtool_ops->get_link == NULL &&
	    slave_ops->ndo_do_ioctl == NULL) {
1342 1343
		netdev_warn(bond_dev, "no link monitoring support for %s\n",
			    slave_dev->name);
L
Linus Torvalds 已提交
1344 1345 1346 1347
	}

	/* already enslaved */
	if (slave_dev->flags & IFF_SLAVE) {
1348
		netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
L
Linus Torvalds 已提交
1349 1350 1351
		return -EBUSY;
	}

1352
	if (bond_dev == slave_dev) {
1353
		netdev_err(bond_dev, "cannot enslave bond to itself.\n");
1354 1355 1356
		return -EPERM;
	}

L
Linus Torvalds 已提交
1357 1358 1359
	/* vlan challenged mutual exclusion */
	/* no need to lock since we're protected by rtnl_lock */
	if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1360 1361
		netdev_dbg(bond_dev, "%s is NETIF_F_VLAN_CHALLENGED\n",
			   slave_dev->name);
1362
		if (vlan_uses_dev(bond_dev)) {
1363 1364
			netdev_err(bond_dev, "Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
				   slave_dev->name, bond_dev->name);
L
Linus Torvalds 已提交
1365 1366
			return -EPERM;
		} else {
1367 1368 1369
			netdev_warn(bond_dev, "enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
				    slave_dev->name, slave_dev->name,
				    bond_dev->name);
L
Linus Torvalds 已提交
1370 1371
		}
	} else {
1372 1373
		netdev_dbg(bond_dev, "%s is !NETIF_F_VLAN_CHALLENGED\n",
			   slave_dev->name);
L
Linus Torvalds 已提交
1374 1375
	}

1376
	/* Old ifenslave binaries are no longer supported.  These can
S
Stephen Hemminger 已提交
1377
	 * be identified with moderate accuracy by the state of the slave:
1378 1379 1380
	 * the current ifenslave will set the interface down prior to
	 * enslaving it; the old ifenslave will not.
	 */
Y
yzhu1 已提交
1381
	if (slave_dev->flags & IFF_UP) {
1382 1383
		netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n",
			   slave_dev->name);
1384
		return -EPERM;
1385
	}
L
Linus Torvalds 已提交
1386

1387 1388 1389 1390 1391 1392 1393
	/* set bonding device ether type by slave - bonding netdevices are
	 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
	 * there is a need to override some of the type dependent attribs/funcs.
	 *
	 * bond ether type mutual exclusion - don't allow slaves of dissimilar
	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
	 */
1394
	if (!bond_has_slaves(bond)) {
1395
		if (bond_dev->type != slave_dev->type) {
1396 1397
			netdev_dbg(bond_dev, "change device type from %d to %d\n",
				   bond_dev->type, slave_dev->type);
1398

1399 1400
			res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
						       bond_dev);
1401 1402
			res = notifier_to_errno(res);
			if (res) {
1403
				netdev_err(bond_dev, "refused to change device type\n");
1404
				return -EBUSY;
1405
			}
1406

1407
			/* Flush unicast and multicast addresses */
1408
			dev_uc_flush(bond_dev);
1409
			dev_mc_flush(bond_dev);
1410

1411 1412
			if (slave_dev->type != ARPHRD_ETHER)
				bond_setup_by_slave(bond_dev, slave_dev);
1413
			else {
1414
				ether_setup(bond_dev);
1415 1416
				bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
			}
1417

1418 1419
			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
						 bond_dev);
1420
		}
1421
	} else if (bond_dev->type != slave_dev->type) {
1422 1423
		netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
			   slave_dev->name, slave_dev->type, bond_dev->type);
1424
		return -EINVAL;
1425 1426
	}

1427
	if (slave_ops->ndo_set_mac_address == NULL) {
1428
		netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
1429 1430 1431
		if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
		    bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
			if (!bond_has_slaves(bond)) {
1432
				bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1433
				netdev_warn(bond_dev, "Setting fail_over_mac to active for active-backup mode\n");
1434
			} else {
1435
				netdev_err(bond_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
1436 1437
				res = -EOPNOTSUPP;
				goto err_undo_flags;
1438
			}
1439
		}
L
Linus Torvalds 已提交
1440 1441
	}

1442 1443
	call_netdevice_notifiers(NETDEV_JOIN, slave_dev);

1444
	/* If this is the first slave, then we need to set the master's hardware
1445 1446
	 * address to be the same as the slave's.
	 */
1447
	if (!bond_has_slaves(bond) &&
1448
	    bond->dev->addr_assign_type == NET_ADDR_RANDOM)
1449
		bond_set_dev_addr(bond->dev, slave_dev);
1450

1451
	new_slave = bond_alloc_slave(bond);
L
Linus Torvalds 已提交
1452 1453 1454 1455
	if (!new_slave) {
		res = -ENOMEM;
		goto err_undo_flags;
	}
1456

1457 1458
	new_slave->bond = bond;
	new_slave->dev = slave_dev;
1459
	/* Set the new_slave's queue_id to be zero.  Queue ID mapping
1460 1461 1462 1463
	 * is set via sysfs or module option if desired.
	 */
	new_slave->queue_id = 0;

1464 1465 1466 1467
	/* Save slave's original mtu and then set it to match the bond */
	new_slave->original_mtu = slave_dev->mtu;
	res = dev_set_mtu(slave_dev, bond->dev->mtu);
	if (res) {
1468
		netdev_dbg(bond_dev, "Error %d calling dev_set_mtu\n", res);
1469 1470 1471
		goto err_free;
	}

1472
	/* Save slave's original ("permanent") mac address for modes
1473 1474 1475
	 * that need it, and for restoring it upon release, and then
	 * set it to the master's address
	 */
1476
	ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
L
Linus Torvalds 已提交
1477

1478
	if (!bond->params.fail_over_mac ||
1479
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1480
		/* Set slave to master's mac address.  The application already
1481 1482 1483 1484 1485 1486
		 * set the master's mac address to that of the first slave
		 */
		memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
		addr.sa_family = slave_dev->type;
		res = dev_set_mac_address(slave_dev, &addr);
		if (res) {
1487
			netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
1488
			goto err_restore_mtu;
1489
		}
1490
	}
L
Linus Torvalds 已提交
1491

1492 1493 1494
	/* set slave flag before open to prevent IPv6 addrconf */
	slave_dev->flags |= IFF_SLAVE;

1495 1496 1497
	/* open the slave since the application closed it */
	res = dev_open(slave_dev);
	if (res) {
1498
		netdev_dbg(bond_dev, "Opening slave %s failed\n", slave_dev->name);
1499
		goto err_restore_mac;
L
Linus Torvalds 已提交
1500 1501
	}

1502
	slave_dev->priv_flags |= IFF_BONDING;
1503 1504
	/* initialize slave stats */
	dev_get_stats(new_slave->dev, &new_slave->slave_stats);
L
Linus Torvalds 已提交
1505

1506
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
1507 1508 1509 1510
		/* bond_alb_init_slave() must be called before all other stages since
		 * it might fail and we do not want to have to undo everything
		 */
		res = bond_alb_init_slave(bond, new_slave);
S
Stephen Hemminger 已提交
1511
		if (res)
1512
			goto err_close;
L
Linus Torvalds 已提交
1513 1514
	}

1515
	/* If the mode uses primary, then the following is handled by
1516
	 * bond_change_active_slave().
L
Linus Torvalds 已提交
1517
	 */
1518
	if (!bond_uses_primary(bond)) {
L
Linus Torvalds 已提交
1519 1520
		/* set promiscuity level to new slave */
		if (bond_dev->flags & IFF_PROMISC) {
1521 1522 1523
			res = dev_set_promiscuity(slave_dev, 1);
			if (res)
				goto err_close;
L
Linus Torvalds 已提交
1524 1525 1526 1527
		}

		/* set allmulti level to new slave */
		if (bond_dev->flags & IFF_ALLMULTI) {
1528 1529 1530
			res = dev_set_allmulti(slave_dev, 1);
			if (res)
				goto err_close;
L
Linus Torvalds 已提交
1531 1532
		}

1533
		netif_addr_lock_bh(bond_dev);
1534 1535 1536 1537

		dev_mc_sync_multiple(slave_dev, bond_dev);
		dev_uc_sync_multiple(slave_dev, bond_dev);

1538
		netif_addr_unlock_bh(bond_dev);
L
Linus Torvalds 已提交
1539 1540
	}

1541
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
L
Linus Torvalds 已提交
1542 1543 1544
		/* add lacpdu mc addr to mc list */
		u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;

1545
		dev_mc_add(slave_dev, lacpdu_multicast);
L
Linus Torvalds 已提交
1546 1547
	}

1548 1549
	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
	if (res) {
1550 1551
		netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
			   slave_dev->name);
1552 1553
		goto err_close;
	}
L
Linus Torvalds 已提交
1554

1555
	prev_slave = bond_last_slave(bond);
L
Linus Torvalds 已提交
1556 1557 1558 1559

	new_slave->delay = 0;
	new_slave->link_failure_count = 0;

1560 1561
	bond_update_speed_duplex(new_slave);

1562
	new_slave->last_rx = jiffies -
1563
		(msecs_to_jiffies(bond->params.arp_interval) + 1);
1564
	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1565
		new_slave->target_last_arp_rx[i] = new_slave->last_rx;
1566

L
Linus Torvalds 已提交
1567 1568 1569 1570
	if (bond->params.miimon && !bond->params.use_carrier) {
		link_reporting = bond_check_dev_link(bond, slave_dev, 1);

		if ((link_reporting == -1) && !bond->params.arp_interval) {
1571
			/* miimon is set but a bonded network driver
L
Linus Torvalds 已提交
1572 1573 1574 1575 1576 1577 1578
			 * does not support ETHTOOL/MII and
			 * arp_interval is not set.  Note: if
			 * use_carrier is enabled, we will never go
			 * here (because netif_carrier is always
			 * supported); thus, we don't need to change
			 * the messages for netif_carrier.
			 */
1579 1580
			netdev_warn(bond_dev, "MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
				    slave_dev->name);
L
Linus Torvalds 已提交
1581 1582
		} else if (link_reporting == -1) {
			/* unable get link status using mii/ethtool */
1583 1584
			netdev_warn(bond_dev, "can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
				    slave_dev->name);
L
Linus Torvalds 已提交
1585 1586 1587 1588
		}
	}

	/* check for initial state */
1589 1590 1591
	if (bond->params.miimon) {
		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
			if (bond->params.updelay) {
1592
				bond_set_slave_link_state(new_slave,
1593 1594
							  BOND_LINK_BACK,
							  BOND_SLAVE_NOTIFY_NOW);
1595 1596
				new_slave->delay = bond->params.updelay;
			} else {
1597
				bond_set_slave_link_state(new_slave,
1598 1599
							  BOND_LINK_UP,
							  BOND_SLAVE_NOTIFY_NOW);
1600
			}
L
Linus Torvalds 已提交
1601
		} else {
1602 1603
			bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1604
		}
1605
	} else if (bond->params.arp_interval) {
1606 1607
		bond_set_slave_link_state(new_slave,
					  (netif_carrier_ok(slave_dev) ?
1608 1609
					  BOND_LINK_UP : BOND_LINK_DOWN),
					  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1610
	} else {
1611 1612
		bond_set_slave_link_state(new_slave, BOND_LINK_UP,
					  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1613 1614
	}

1615
	if (new_slave->link != BOND_LINK_DOWN)
1616
		new_slave->last_link_up = jiffies;
1617 1618 1619
	netdev_dbg(bond_dev, "Initial state of slave_dev is BOND_LINK_%s\n",
		   new_slave->link == BOND_LINK_DOWN ? "DOWN" :
		   (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1620

1621
	if (bond_uses_primary(bond) && bond->params.primary[0]) {
L
Linus Torvalds 已提交
1622
		/* if there is a primary slave, remember it */
1623
		if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1624
			rcu_assign_pointer(bond->primary_slave, new_slave);
1625 1626
			bond->force_primary = true;
		}
L
Linus Torvalds 已提交
1627 1628
	}

1629
	switch (BOND_MODE(bond)) {
L
Linus Torvalds 已提交
1630
	case BOND_MODE_ACTIVEBACKUP:
1631 1632
		bond_set_slave_inactive_flags(new_slave,
					      BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1633 1634 1635 1636 1637 1638
		break;
	case BOND_MODE_8023AD:
		/* in 802.3ad mode, the internal mechanism
		 * will activate the slaves in the selected
		 * aggregator
		 */
1639
		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1640
		/* if this is the first slave */
1641
		if (!prev_slave) {
1642
			SLAVE_AD_INFO(new_slave)->id = 1;
L
Linus Torvalds 已提交
1643 1644 1645
			/* Initialize AD with the number of times that the AD timer is called in 1 second
			 * can be called only after the mac address of the bond is set
			 */
1646
			bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
L
Linus Torvalds 已提交
1647
		} else {
1648 1649
			SLAVE_AD_INFO(new_slave)->id =
				SLAVE_AD_INFO(prev_slave)->id + 1;
L
Linus Torvalds 已提交
1650 1651 1652 1653 1654 1655
		}

		bond_3ad_bind_slave(new_slave);
		break;
	case BOND_MODE_TLB:
	case BOND_MODE_ALB:
J
Jiri Pirko 已提交
1656
		bond_set_active_slave(new_slave);
1657
		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1658 1659
		break;
	default:
1660
		netdev_dbg(bond_dev, "This slave is always active in trunk mode\n");
L
Linus Torvalds 已提交
1661 1662

		/* always active in trunk mode */
J
Jiri Pirko 已提交
1663
		bond_set_active_slave(new_slave);
L
Linus Torvalds 已提交
1664 1665 1666 1667 1668

		/* In trunking mode there is little meaning to curr_active_slave
		 * anyway (it holds no special properties of the bond device),
		 * so we can change it without calling change_active_interface()
		 */
1669 1670
		if (!rcu_access_pointer(bond->curr_active_slave) &&
		    new_slave->link == BOND_LINK_UP)
1671
			rcu_assign_pointer(bond->curr_active_slave, new_slave);
S
Stephen Hemminger 已提交
1672

L
Linus Torvalds 已提交
1673 1674 1675
		break;
	} /* switch(bond_mode) */

1676
#ifdef CONFIG_NET_POLL_CONTROLLER
S
stephen hemminger 已提交
1677
	slave_dev->npinfo = bond->dev->npinfo;
1678 1679
	if (slave_dev->npinfo) {
		if (slave_enable_netpoll(new_slave)) {
1680
			netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
1681
			res = -EBUSY;
1682
			goto err_detach;
1683
		}
1684 1685
	}
#endif
1686

1687 1688 1689
	if (!(bond_dev->features & NETIF_F_LRO))
		dev_disable_lro(slave_dev);

J
Jiri Pirko 已提交
1690 1691 1692
	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
					 new_slave);
	if (res) {
1693
		netdev_dbg(bond_dev, "Error %d calling netdev_rx_handler_register\n", res);
1694
		goto err_detach;
J
Jiri Pirko 已提交
1695 1696
	}

1697
	res = bond_master_upper_dev_link(bond, new_slave);
1698
	if (res) {
1699
		netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res);
1700 1701 1702
		goto err_unregister;
	}

1703 1704
	res = bond_sysfs_slave_add(new_slave);
	if (res) {
1705
		netdev_dbg(bond_dev, "Error %d calling bond_sysfs_slave_add\n", res);
1706 1707 1708
		goto err_upper_unlink;
	}

1709 1710 1711 1712
	bond->slave_cnt++;
	bond_compute_features(bond);
	bond_set_carrier(bond);

1713
	if (bond_uses_primary(bond)) {
1714
		block_netpoll_tx();
1715
		bond_select_active_slave(bond);
1716
		unblock_netpoll_tx();
1717
	}
1718

1719 1720 1721
	if (bond_mode_uses_xmit_hash(bond))
		bond_update_slave_arr(bond, NULL);

1722 1723 1724 1725
	netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
		    slave_dev->name,
		    bond_is_active_slave(new_slave) ? "an active" : "a backup",
		    new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
L
Linus Torvalds 已提交
1726 1727

	/* enslave is successful */
1728
	bond_queue_slave_event(new_slave);
L
Linus Torvalds 已提交
1729 1730 1731
	return 0;

/* Undo stages on error */
1732
err_upper_unlink:
1733
	bond_upper_dev_unlink(bond, new_slave);
1734

1735 1736 1737
err_unregister:
	netdev_rx_handler_unregister(slave_dev);

1738
err_detach:
1739
	if (!bond_uses_primary(bond))
1740 1741
		bond_hw_addr_flush(bond_dev, slave_dev);

1742
	vlan_vids_del_by_dev(slave_dev, bond_dev);
1743 1744
	if (rcu_access_pointer(bond->primary_slave) == new_slave)
		RCU_INIT_POINTER(bond->primary_slave, NULL);
1745
	if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
1746
		block_netpoll_tx();
1747
		bond_change_active_slave(bond, NULL);
1748
		bond_select_active_slave(bond);
1749
		unblock_netpoll_tx();
1750
	}
1751 1752
	/* either primary_slave or curr_active_slave might've changed */
	synchronize_rcu();
1753
	slave_disable_netpoll(new_slave);
1754

L
Linus Torvalds 已提交
1755
err_close:
1756
	slave_dev->priv_flags &= ~IFF_BONDING;
L
Linus Torvalds 已提交
1757 1758 1759
	dev_close(slave_dev);

err_restore_mac:
1760
	slave_dev->flags &= ~IFF_SLAVE;
1761
	if (!bond->params.fail_over_mac ||
1762
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1763 1764 1765 1766
		/* XXX TODO - fom follow mode needs to change master's
		 * MAC if this slave's MAC is in use by the bond, or at
		 * least print a warning.
		 */
1767
		ether_addr_copy(addr.sa_data, new_slave->perm_hwaddr);
1768 1769 1770
		addr.sa_family = slave_dev->type;
		dev_set_mac_address(slave_dev, &addr);
	}
L
Linus Torvalds 已提交
1771

1772 1773 1774
err_restore_mtu:
	dev_set_mtu(slave_dev, new_slave->original_mtu);

L
Linus Torvalds 已提交
1775
err_free:
1776
	bond_free_slave(new_slave);
L
Linus Torvalds 已提交
1777 1778

err_undo_flags:
1779
	/* Enslave of first slave has failed and we need to fix master's mac */
1780 1781 1782 1783 1784
	if (!bond_has_slaves(bond)) {
		if (ether_addr_equal_64bits(bond_dev->dev_addr,
					    slave_dev->dev_addr))
			eth_hw_addr_random(bond_dev);
		if (bond_dev->type != ARPHRD_ETHER) {
1785
			dev_close(bond_dev);
1786 1787 1788 1789 1790
			ether_setup(bond_dev);
			bond_dev->flags |= IFF_MASTER;
			bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
		}
	}
S
Stephen Hemminger 已提交
1791

L
Linus Torvalds 已提交
1792 1793 1794
	return res;
}

1795
/* Try to release the slave device <slave> from the bond device <master>
L
Linus Torvalds 已提交
1796
 * It is legal to access curr_active_slave without a lock because all the function
1797
 * is RTNL-locked. If "all" is true it means that the function is being called
1798
 * while destroying a bond interface and all slaves are being released.
L
Linus Torvalds 已提交
1799 1800 1801 1802 1803 1804 1805
 *
 * The rules for slave state should be:
 *   for Active/Backup:
 *     Active stays on all backups go down
 *   for Bonded connections:
 *     The first up interface should be left on and all others downed.
 */
1806 1807 1808
static int __bond_release_one(struct net_device *bond_dev,
			      struct net_device *slave_dev,
			      bool all)
L
Linus Torvalds 已提交
1809
{
1810
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
1811 1812
	struct slave *slave, *oldcurrent;
	struct sockaddr addr;
1813
	int old_flags = bond_dev->flags;
1814
	netdev_features_t old_features = bond_dev->features;
L
Linus Torvalds 已提交
1815 1816 1817

	/* slave is not a slave or master is not master of this slave */
	if (!(slave_dev->flags & IFF_SLAVE) ||
1818
	    !netdev_has_upper_dev(slave_dev, bond_dev)) {
1819
		netdev_dbg(bond_dev, "cannot release %s\n",
1820
			   slave_dev->name);
L
Linus Torvalds 已提交
1821 1822 1823
		return -EINVAL;
	}

1824
	block_netpoll_tx();
L
Linus Torvalds 已提交
1825 1826 1827 1828

	slave = bond_get_slave_by_dev(bond, slave_dev);
	if (!slave) {
		/* not a slave of this bond */
1829 1830
		netdev_info(bond_dev, "%s not enslaved\n",
			    slave_dev->name);
1831
		unblock_netpoll_tx();
L
Linus Torvalds 已提交
1832 1833 1834
		return -EINVAL;
	}

1835 1836
	bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);

1837 1838
	bond_sysfs_slave_del(slave);

1839 1840 1841
	/* recompute stats just before removing the slave */
	bond_get_stats(bond->dev, &bond->bond_stats);

1842
	bond_upper_dev_unlink(bond, slave);
J
Jiri Pirko 已提交
1843 1844 1845 1846 1847
	/* unregister rx_handler early so bond_handle_frame wouldn't be called
	 * for this slave anymore.
	 */
	netdev_rx_handler_unregister(slave_dev);

1848
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
L
Linus Torvalds 已提交
1849 1850
		bond_3ad_unbind_slave(slave);

1851 1852 1853
	if (bond_mode_uses_xmit_hash(bond))
		bond_update_slave_arr(bond, slave);

1854 1855 1856
	netdev_info(bond_dev, "Releasing %s interface %s\n",
		    bond_is_active_slave(slave) ? "active" : "backup",
		    slave_dev->name);
L
Linus Torvalds 已提交
1857

1858
	oldcurrent = rcu_access_pointer(bond->curr_active_slave);
L
Linus Torvalds 已提交
1859

1860
	RCU_INIT_POINTER(bond->current_arp_slave, NULL);
L
Linus Torvalds 已提交
1861

1862
	if (!all && (!bond->params.fail_over_mac ||
1863
		     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
1864
		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
1865
		    bond_has_slaves(bond))
1866 1867 1868
			netdev_warn(bond_dev, "the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
				    slave_dev->name, slave->perm_hwaddr,
				    bond_dev->name, slave_dev->name);
1869 1870
	}

1871 1872
	if (rtnl_dereference(bond->primary_slave) == slave)
		RCU_INIT_POINTER(bond->primary_slave, NULL);
L
Linus Torvalds 已提交
1873

1874
	if (oldcurrent == slave)
L
Linus Torvalds 已提交
1875 1876
		bond_change_active_slave(bond, NULL);

1877
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
1878 1879 1880 1881 1882 1883 1884 1885
		/* Must be called only after the slave has been
		 * detached from the list and the curr_active_slave
		 * has been cleared (if our_slave == old_current),
		 * but before a new active slave is selected.
		 */
		bond_alb_deinit_slave(bond, slave);
	}

1886
	if (all) {
1887
		RCU_INIT_POINTER(bond->curr_active_slave, NULL);
1888
	} else if (oldcurrent == slave) {
1889
		/* Note that we hold RTNL over this sequence, so there
1890 1891 1892
		 * is no concern that another slave add/remove event
		 * will interfere.
		 */
L
Linus Torvalds 已提交
1893
		bond_select_active_slave(bond);
1894 1895
	}

1896
	if (!bond_has_slaves(bond)) {
1897
		bond_set_carrier(bond);
1898
		eth_hw_addr_random(bond_dev);
L
Linus Torvalds 已提交
1899 1900
	}

1901
	unblock_netpoll_tx();
1902
	synchronize_rcu();
1903
	bond->slave_cnt--;
L
Linus Torvalds 已提交
1904

1905
	if (!bond_has_slaves(bond)) {
1906
		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
1907 1908
		call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
	}
1909

1910 1911 1912
	bond_compute_features(bond);
	if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
	    (old_features & NETIF_F_VLAN_CHALLENGED))
1913 1914
		netdev_info(bond_dev, "last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
			    slave_dev->name, bond_dev->name);
1915

1916
	vlan_vids_del_by_dev(slave_dev, bond_dev);
L
Linus Torvalds 已提交
1917

1918
	/* If the mode uses primary, then this case was handled above by
1919
	 * bond_change_active_slave(..., NULL)
L
Linus Torvalds 已提交
1920
	 */
1921
	if (!bond_uses_primary(bond)) {
1922 1923 1924 1925 1926 1927 1928 1929
		/* unset promiscuity level from slave
		 * NOTE: The NETDEV_CHANGEADDR call above may change the value
		 * of the IFF_PROMISC flag in the bond_dev, but we need the
		 * value of that flag before that change, as that was the value
		 * when this slave was attached, so we cache at the start of the
		 * function and use it here. Same goes for ALLMULTI below
		 */
		if (old_flags & IFF_PROMISC)
L
Linus Torvalds 已提交
1930 1931 1932
			dev_set_promiscuity(slave_dev, -1);

		/* unset allmulti level from slave */
1933
		if (old_flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
1934 1935
			dev_set_allmulti(slave_dev, -1);

1936
		bond_hw_addr_flush(bond_dev, slave_dev);
L
Linus Torvalds 已提交
1937 1938
	}

1939
	slave_disable_netpoll(slave);
1940

L
Linus Torvalds 已提交
1941 1942 1943
	/* close slave before restoring its mac address */
	dev_close(slave_dev);

1944
	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
1945
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1946
		/* restore original ("permanent") mac address */
1947
		ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
1948 1949 1950
		addr.sa_family = slave_dev->type;
		dev_set_mac_address(slave_dev, &addr);
	}
L
Linus Torvalds 已提交
1951

1952 1953
	dev_set_mtu(slave_dev, slave->original_mtu);

1954
	slave_dev->priv_flags &= ~IFF_BONDING;
L
Linus Torvalds 已提交
1955

1956
	bond_free_slave(slave);
L
Linus Torvalds 已提交
1957

1958
	return 0;
L
Linus Torvalds 已提交
1959 1960
}

1961 1962 1963 1964 1965 1966
/* A wrapper used because of ndo_del_link */
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
{
	return __bond_release_one(bond_dev, slave_dev, false);
}

1967 1968 1969
/* First release a slave and then destroy the bond if no more slaves are left.
 * Must be under rtnl_lock when this function is called.
 */
1970 1971
static int  bond_release_and_destroy(struct net_device *bond_dev,
				     struct net_device *slave_dev)
1972
{
1973
	struct bonding *bond = netdev_priv(bond_dev);
1974 1975 1976
	int ret;

	ret = bond_release(bond_dev, slave_dev);
1977
	if (ret == 0 && !bond_has_slaves(bond)) {
1978
		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1979 1980
		netdev_info(bond_dev, "Destroying bond %s\n",
			    bond_dev->name);
1981
		bond_remove_proc_entry(bond);
S
Stephen Hemminger 已提交
1982
		unregister_netdevice(bond_dev);
1983 1984 1985 1986
	}
	return ret;
}

L
Linus Torvalds 已提交
1987 1988
static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
1989
	struct bonding *bond = netdev_priv(bond_dev);
1990
	bond_fill_ifbond(bond, info);
L
Linus Torvalds 已提交
1991 1992 1993 1994 1995
	return 0;
}

static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
1996
	struct bonding *bond = netdev_priv(bond_dev);
1997
	struct list_head *iter;
1998
	int i = 0, res = -ENODEV;
L
Linus Torvalds 已提交
1999 2000
	struct slave *slave;

2001
	bond_for_each_slave(bond, slave, iter) {
2002
		if (i++ == (int)info->slave_id) {
2003
			res = 0;
2004
			bond_fill_ifslave(slave, info);
L
Linus Torvalds 已提交
2005 2006 2007 2008
			break;
		}
	}

2009
	return res;
L
Linus Torvalds 已提交
2010 2011 2012 2013
}

/*-------------------------------- Monitoring -------------------------------*/

2014
/* called with rcu_read_lock() */
J
Jay Vosburgh 已提交
2015 2016
static int bond_miimon_inspect(struct bonding *bond)
{
2017
	int link_state, commit = 0;
2018
	struct list_head *iter;
J
Jay Vosburgh 已提交
2019
	struct slave *slave;
2020 2021
	bool ignore_updelay;

2022
	ignore_updelay = !rcu_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
2023

2024
	bond_for_each_slave_rcu(bond, slave, iter) {
J
Jay Vosburgh 已提交
2025
		slave->new_link = BOND_LINK_NOCHANGE;
L
Linus Torvalds 已提交
2026

J
Jay Vosburgh 已提交
2027
		link_state = bond_check_dev_link(bond, slave->dev, 0);
L
Linus Torvalds 已提交
2028 2029

		switch (slave->link) {
J
Jay Vosburgh 已提交
2030 2031 2032
		case BOND_LINK_UP:
			if (link_state)
				continue;
L
Linus Torvalds 已提交
2033

2034 2035
			bond_set_slave_link_state(slave, BOND_LINK_FAIL,
						  BOND_SLAVE_NOTIFY_LATER);
J
Jay Vosburgh 已提交
2036 2037
			slave->delay = bond->params.downdelay;
			if (slave->delay) {
2038 2039 2040 2041 2042 2043 2044
				netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
					    (BOND_MODE(bond) ==
					     BOND_MODE_ACTIVEBACKUP) ?
					     (bond_is_active_slave(slave) ?
					      "active " : "backup ") : "",
					    slave->dev->name,
					    bond->params.downdelay * bond->params.miimon);
L
Linus Torvalds 已提交
2045
			}
J
Jay Vosburgh 已提交
2046 2047 2048
			/*FALLTHRU*/
		case BOND_LINK_FAIL:
			if (link_state) {
2049
				/* recovered before downdelay expired */
2050 2051
				bond_set_slave_link_state(slave, BOND_LINK_UP,
							  BOND_SLAVE_NOTIFY_LATER);
2052
				slave->last_link_up = jiffies;
2053 2054 2055 2056
				netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
					    (bond->params.downdelay - slave->delay) *
					    bond->params.miimon,
					    slave->dev->name);
J
Jay Vosburgh 已提交
2057
				continue;
L
Linus Torvalds 已提交
2058
			}
J
Jay Vosburgh 已提交
2059 2060 2061 2062 2063

			if (slave->delay <= 0) {
				slave->new_link = BOND_LINK_DOWN;
				commit++;
				continue;
L
Linus Torvalds 已提交
2064 2065
			}

J
Jay Vosburgh 已提交
2066 2067 2068 2069 2070 2071 2072
			slave->delay--;
			break;

		case BOND_LINK_DOWN:
			if (!link_state)
				continue;

2073 2074
			bond_set_slave_link_state(slave, BOND_LINK_BACK,
						  BOND_SLAVE_NOTIFY_LATER);
J
Jay Vosburgh 已提交
2075 2076 2077
			slave->delay = bond->params.updelay;

			if (slave->delay) {
2078 2079 2080 2081 2082
				netdev_info(bond->dev, "link status up for interface %s, enabling it in %d ms\n",
					    slave->dev->name,
					    ignore_updelay ? 0 :
					    bond->params.updelay *
					    bond->params.miimon);
J
Jay Vosburgh 已提交
2083 2084 2085 2086
			}
			/*FALLTHRU*/
		case BOND_LINK_BACK:
			if (!link_state) {
2087
				bond_set_slave_link_state(slave,
2088 2089
							  BOND_LINK_DOWN,
							  BOND_SLAVE_NOTIFY_LATER);
2090 2091 2092 2093
				netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
					    (bond->params.updelay - slave->delay) *
					    bond->params.miimon,
					    slave->dev->name);
J
Jay Vosburgh 已提交
2094 2095 2096 2097

				continue;
			}

2098 2099 2100
			if (ignore_updelay)
				slave->delay = 0;

J
Jay Vosburgh 已提交
2101 2102 2103
			if (slave->delay <= 0) {
				slave->new_link = BOND_LINK_UP;
				commit++;
2104
				ignore_updelay = false;
J
Jay Vosburgh 已提交
2105
				continue;
L
Linus Torvalds 已提交
2106
			}
J
Jay Vosburgh 已提交
2107 2108

			slave->delay--;
L
Linus Torvalds 已提交
2109
			break;
J
Jay Vosburgh 已提交
2110 2111
		}
	}
L
Linus Torvalds 已提交
2112

J
Jay Vosburgh 已提交
2113 2114
	return commit;
}
L
Linus Torvalds 已提交
2115

J
Jay Vosburgh 已提交
2116 2117
static void bond_miimon_commit(struct bonding *bond)
{
2118
	struct list_head *iter;
2119
	struct slave *slave, *primary;
J
Jay Vosburgh 已提交
2120

2121
	bond_for_each_slave(bond, slave, iter) {
J
Jay Vosburgh 已提交
2122 2123 2124
		switch (slave->new_link) {
		case BOND_LINK_NOCHANGE:
			continue;
L
Linus Torvalds 已提交
2125

J
Jay Vosburgh 已提交
2126
		case BOND_LINK_UP:
2127
			bond_update_speed_duplex(slave);
2128 2129
			bond_set_slave_link_state(slave, BOND_LINK_UP,
						  BOND_SLAVE_NOTIFY_NOW);
2130
			slave->last_link_up = jiffies;
J
Jay Vosburgh 已提交
2131

2132
			primary = rtnl_dereference(bond->primary_slave);
2133
			if (BOND_MODE(bond) == BOND_MODE_8023AD) {
J
Jay Vosburgh 已提交
2134
				/* prevent it from being the active one */
J
Jiri Pirko 已提交
2135
				bond_set_backup_slave(slave);
2136
			} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
J
Jay Vosburgh 已提交
2137
				/* make it immediately active */
J
Jiri Pirko 已提交
2138
				bond_set_active_slave(slave);
2139
			} else if (slave != primary) {
J
Jay Vosburgh 已提交
2140
				/* prevent it from being the active one */
J
Jiri Pirko 已提交
2141
				bond_set_backup_slave(slave);
L
Linus Torvalds 已提交
2142 2143
			}

2144 2145 2146 2147
			netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",
				    slave->dev->name,
				    slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
				    slave->duplex ? "full" : "half");
L
Linus Torvalds 已提交
2148

J
Jay Vosburgh 已提交
2149
			/* notify ad that the link status has changed */
2150
			if (BOND_MODE(bond) == BOND_MODE_8023AD)
J
Jay Vosburgh 已提交
2151
				bond_3ad_handle_link_change(slave, BOND_LINK_UP);
2152

2153
			if (bond_is_lb(bond))
J
Jay Vosburgh 已提交
2154 2155
				bond_alb_handle_link_change(bond, slave,
							    BOND_LINK_UP);
L
Linus Torvalds 已提交
2156

2157 2158 2159
			if (BOND_MODE(bond) == BOND_MODE_XOR)
				bond_update_slave_arr(bond, NULL);

2160
			if (!bond->curr_active_slave || slave == primary)
J
Jay Vosburgh 已提交
2161
				goto do_failover;
L
Linus Torvalds 已提交
2162

J
Jay Vosburgh 已提交
2163
			continue;
2164

J
Jay Vosburgh 已提交
2165
		case BOND_LINK_DOWN:
J
Jay Vosburgh 已提交
2166 2167 2168
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;

2169 2170
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
2171

2172 2173
			if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
			    BOND_MODE(bond) == BOND_MODE_8023AD)
2174 2175
				bond_set_slave_inactive_flags(slave,
							      BOND_SLAVE_NOTIFY_NOW);
J
Jay Vosburgh 已提交
2176

2177 2178
			netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
				    slave->dev->name);
J
Jay Vosburgh 已提交
2179

2180
			if (BOND_MODE(bond) == BOND_MODE_8023AD)
J
Jay Vosburgh 已提交
2181 2182 2183
				bond_3ad_handle_link_change(slave,
							    BOND_LINK_DOWN);

2184
			if (bond_is_lb(bond))
J
Jay Vosburgh 已提交
2185 2186 2187
				bond_alb_handle_link_change(bond, slave,
							    BOND_LINK_DOWN);

2188 2189 2190
			if (BOND_MODE(bond) == BOND_MODE_XOR)
				bond_update_slave_arr(bond, NULL);

2191
			if (slave == rcu_access_pointer(bond->curr_active_slave))
J
Jay Vosburgh 已提交
2192 2193 2194 2195 2196
				goto do_failover;

			continue;

		default:
2197 2198
			netdev_err(bond->dev, "invalid new link %d on slave %s\n",
				   slave->new_link, slave->dev->name);
J
Jay Vosburgh 已提交
2199 2200 2201 2202 2203 2204
			slave->new_link = BOND_LINK_NOCHANGE;

			continue;
		}

do_failover:
2205
		block_netpoll_tx();
J
Jay Vosburgh 已提交
2206
		bond_select_active_slave(bond);
2207
		unblock_netpoll_tx();
J
Jay Vosburgh 已提交
2208 2209 2210
	}

	bond_set_carrier(bond);
L
Linus Torvalds 已提交
2211 2212
}

2213
/* bond_mii_monitor
2214 2215
 *
 * Really a wrapper that splits the mii monitor into two phases: an
J
Jay Vosburgh 已提交
2216 2217 2218
 * inspection, then (if inspection indicates something needs to be done)
 * an acquisition of appropriate locks followed by a commit phase to
 * implement whatever link state changes are indicated.
2219
 */
2220
static void bond_mii_monitor(struct work_struct *work)
2221 2222 2223
{
	struct bonding *bond = container_of(work, struct bonding,
					    mii_work.work);
2224
	bool should_notify_peers = false;
2225
	unsigned long delay;
2226

2227 2228 2229
	delay = msecs_to_jiffies(bond->params.miimon);

	if (!bond_has_slaves(bond))
J
Jay Vosburgh 已提交
2230
		goto re_arm;
2231

2232 2233
	rcu_read_lock();

2234 2235
	should_notify_peers = bond_should_notify_peers(bond);

2236
	if (bond_miimon_inspect(bond)) {
2237
		rcu_read_unlock();
J
Jay Vosburgh 已提交
2238

2239 2240 2241 2242 2243 2244
		/* Race avoidance with bond_close cancel of workqueue */
		if (!rtnl_trylock()) {
			delay = 1;
			should_notify_peers = false;
			goto re_arm;
		}
2245

2246 2247 2248
		bond_miimon_commit(bond);

		rtnl_unlock();	/* might sleep, hold no other locks */
2249 2250
	} else
		rcu_read_unlock();
2251

J
Jay Vosburgh 已提交
2252
re_arm:
2253
	if (bond->params.miimon)
2254 2255 2256 2257 2258 2259 2260 2261
		queue_delayed_work(bond->wq, &bond->mii_work, delay);

	if (should_notify_peers) {
		if (!rtnl_trylock())
			return;
		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
		rtnl_unlock();
	}
2262
}
J
Jay Vosburgh 已提交
2263

2264
static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2265
{
2266 2267 2268
	struct net_device *upper;
	struct list_head *iter;
	bool ret = false;
2269

2270
	if (ip == bond_confirm_addr(bond->dev, 0, ip))
2271
		return true;
2272

2273
	rcu_read_lock();
2274
	netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
2275 2276 2277 2278
		if (ip == bond_confirm_addr(upper, 0, ip)) {
			ret = true;
			break;
		}
2279
	}
2280
	rcu_read_unlock();
2281

2282
	return ret;
2283 2284
}

2285
/* We go to the (large) trouble of VLAN tagging ARP frames because
J
Jay Vosburgh 已提交
2286 2287 2288
 * switches in VLAN mode (especially if ports are configured as
 * "native" to a VLAN) might not pass non-tagged frames.
 */
2289 2290
static void bond_arp_send(struct net_device *slave_dev, int arp_op,
			  __be32 dest_ip, __be32 src_ip,
2291
			  struct bond_vlan_tag *tags)
J
Jay Vosburgh 已提交
2292 2293
{
	struct sk_buff *skb;
2294
	struct bond_vlan_tag *outer_tag = tags;
J
Jay Vosburgh 已提交
2295

2296 2297
	netdev_dbg(slave_dev, "arp %d on slave %s: dst %pI4 src %pI4\n",
		   arp_op, slave_dev->name, &dest_ip, &src_ip);
S
Stephen Hemminger 已提交
2298

J
Jay Vosburgh 已提交
2299 2300 2301 2302
	skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
			 NULL, slave_dev->dev_addr, NULL);

	if (!skb) {
2303
		net_err_ratelimited("ARP packet allocation failed\n");
J
Jay Vosburgh 已提交
2304 2305
		return;
	}
2306

2307 2308 2309 2310 2311
	if (!tags || tags->vlan_proto == VLAN_N_VID)
		goto xmit;

	tags++;

2312
	/* Go through all the tags backwards and add them to the packet */
2313 2314 2315
	while (tags->vlan_proto != VLAN_N_VID) {
		if (!tags->vlan_id) {
			tags++;
2316
			continue;
2317
		}
2318

2319
		netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
2320
			   ntohs(outer_tag->vlan_proto), tags->vlan_id);
2321 2322
		skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
						tags->vlan_id);
2323 2324 2325 2326
		if (!skb) {
			net_err_ratelimited("failed to insert inner VLAN tag\n");
			return;
		}
2327 2328

		tags++;
2329 2330
	}
	/* Set the outer tag */
2331
	if (outer_tag->vlan_id) {
2332
		netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
2333
			   ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
J
Jiri Pirko 已提交
2334 2335
		__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
				       outer_tag->vlan_id);
J
Jay Vosburgh 已提交
2336
	}
2337 2338

xmit:
J
Jay Vosburgh 已提交
2339 2340 2341
	arp_xmit(skb);
}

2342 2343 2344 2345 2346 2347
/* Validate the device path between the @start_dev and the @end_dev.
 * The path is valid if the @end_dev is reachable through device
 * stacking.
 * When the path is validated, collect any vlan information in the
 * path.
 */
2348 2349 2350
struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
					      struct net_device *end_dev,
					      int level)
2351
{
2352
	struct bond_vlan_tag *tags;
2353 2354 2355
	struct net_device *upper;
	struct list_head  *iter;

2356 2357 2358 2359 2360 2361 2362
	if (start_dev == end_dev) {
		tags = kzalloc(sizeof(*tags) * (level + 1), GFP_ATOMIC);
		if (!tags)
			return ERR_PTR(-ENOMEM);
		tags[level].vlan_proto = VLAN_N_VID;
		return tags;
	}
2363 2364

	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2365 2366 2367 2368 2369
		tags = bond_verify_device_path(upper, end_dev, level + 1);
		if (IS_ERR_OR_NULL(tags)) {
			if (IS_ERR(tags))
				return tags;
			continue;
2370
		}
2371 2372 2373 2374 2375 2376
		if (is_vlan_dev(upper)) {
			tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
			tags[level].vlan_id = vlan_dev_vlan_id(upper);
		}

		return tags;
2377 2378
	}

2379
	return NULL;
2380
}
J
Jay Vosburgh 已提交
2381

L
Linus Torvalds 已提交
2382 2383
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
J
Jay Vosburgh 已提交
2384
	struct rtable *rt;
2385
	struct bond_vlan_tag *tags;
2386
	__be32 *targets = bond->params.arp_targets, addr;
2387
	int i;
L
Linus Torvalds 已提交
2388

2389
	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2390
		netdev_dbg(bond->dev, "basa: target %pI4\n", &targets[i]);
2391
		tags = NULL;
J
Jay Vosburgh 已提交
2392

2393
		/* Find out through which dev should the packet go */
2394 2395
		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
				     RTO_ONLINK, 0);
2396
		if (IS_ERR(rt)) {
2397 2398 2399
			/* there's no route to target - try to send arp
			 * probe to generate any traffic (arp_validate=0)
			 */
2400 2401 2402 2403
			if (bond->params.arp_validate)
				net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
						     bond->dev->name,
						     &targets[i]);
2404 2405
			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
				      0, tags);
J
Jay Vosburgh 已提交
2406 2407 2408
			continue;
		}

2409 2410 2411 2412 2413
		/* bond device itself */
		if (rt->dst.dev == bond->dev)
			goto found;

		rcu_read_lock();
2414
		tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
2415
		rcu_read_unlock();
J
Jay Vosburgh 已提交
2416

2417
		if (!IS_ERR_OR_NULL(tags))
2418 2419
			goto found;

2420
		/* Not our device - skip */
2421 2422
		netdev_dbg(bond->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
			   &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
2423

2424
		ip_rt_put(rt);
2425 2426 2427 2428 2429 2430
		continue;

found:
		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
		ip_rt_put(rt);
		bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2431
			      addr, tags);
2432
		kfree(tags);
J
Jay Vosburgh 已提交
2433 2434 2435
	}
}

2436
static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2437
{
2438 2439
	int i;

2440
	if (!sip || !bond_has_this_ip(bond, tip)) {
2441 2442
		netdev_dbg(bond->dev, "bva: sip %pI4 tip %pI4 not found\n",
			   &sip, &tip);
2443 2444
		return;
	}
2445

2446 2447
	i = bond_get_targets_ip(bond->params.arp_targets, sip);
	if (i == -1) {
2448 2449
		netdev_dbg(bond->dev, "bva: sip %pI4 not found in targets\n",
			   &sip);
2450
		return;
2451
	}
2452
	slave->last_rx = jiffies;
2453
	slave->target_last_arp_rx[i] = jiffies;
2454 2455
}

2456 2457
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
		 struct slave *slave)
2458
{
2459
	struct arphdr *arp = (struct arphdr *)skb->data;
2460
	struct slave *curr_active_slave, *curr_arp_slave;
2461
	unsigned char *arp_ptr;
2462
	__be32 sip, tip;
2463
	int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
2464

2465
	if (!slave_do_arp_validate(bond, slave)) {
2466 2467
		if ((slave_do_arp_validate_only(bond) && is_arp) ||
		    !slave_do_arp_validate_only(bond))
2468
			slave->last_rx = jiffies;
2469
		return RX_HANDLER_ANOTHER;
2470 2471 2472
	} else if (!is_arp) {
		return RX_HANDLER_ANOTHER;
	}
2473

2474
	alen = arp_hdr_len(bond->dev);
2475

2476 2477
	netdev_dbg(bond->dev, "bond_arp_rcv: skb->dev %s\n",
		   skb->dev->name);
2478

2479 2480 2481 2482 2483 2484 2485
	if (alen > skb_headlen(skb)) {
		arp = kmalloc(alen, GFP_ATOMIC);
		if (!arp)
			goto out_unlock;
		if (skb_copy_bits(skb, 0, arp, alen) < 0)
			goto out_unlock;
	}
2486

2487
	if (arp->ar_hln != bond->dev->addr_len ||
2488 2489 2490 2491 2492 2493 2494 2495
	    skb->pkt_type == PACKET_OTHERHOST ||
	    skb->pkt_type == PACKET_LOOPBACK ||
	    arp->ar_hrd != htons(ARPHRD_ETHER) ||
	    arp->ar_pro != htons(ETH_P_IP) ||
	    arp->ar_pln != 4)
		goto out_unlock;

	arp_ptr = (unsigned char *)(arp + 1);
2496
	arp_ptr += bond->dev->addr_len;
2497
	memcpy(&sip, arp_ptr, 4);
2498
	arp_ptr += 4 + bond->dev->addr_len;
2499 2500
	memcpy(&tip, arp_ptr, 4);

2501 2502 2503 2504
	netdev_dbg(bond->dev, "bond_arp_rcv: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
		   slave->dev->name, bond_slave_state(slave),
		     bond->params.arp_validate, slave_do_arp_validate(bond, slave),
		     &sip, &tip);
2505

2506
	curr_active_slave = rcu_dereference(bond->curr_active_slave);
2507
	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2508

2509
	/* We 'trust' the received ARP enough to validate it if:
2510
	 *
2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529
	 * (a) the slave receiving the ARP is active (which includes the
	 * current ARP slave, if any), or
	 *
	 * (b) the receiving slave isn't active, but there is a currently
	 * active slave and it received valid arp reply(s) after it became
	 * the currently active slave, or
	 *
	 * (c) there is an ARP slave that sent an ARP during the prior ARP
	 * interval, and we receive an ARP reply on any slave.  We accept
	 * these because switch FDB update delays may deliver the ARP
	 * reply to a slave other than the sender of the ARP request.
	 *
	 * Note: for (b), backup slaves are receiving the broadcast ARP
	 * request, not a reply.  This request passes from the sending
	 * slave through the L2 switch(es) to the receiving slave.  Since
	 * this is checking the request, sip/tip are swapped for
	 * validation.
	 *
	 * This is done to avoid endless looping when we can't reach the
2530
	 * arp_ip_target and fool ourselves with our own arp requests.
2531
	 */
J
Jiri Pirko 已提交
2532
	if (bond_is_active_slave(slave))
2533
		bond_validate_arp(bond, slave, sip, tip);
2534 2535 2536
	else if (curr_active_slave &&
		 time_after(slave_last_rx(bond, curr_active_slave),
			    curr_active_slave->last_link_up))
2537
		bond_validate_arp(bond, slave, tip, sip);
2538 2539 2540 2541
	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
		 bond_time_in_interval(bond,
				       dev_trans_start(curr_arp_slave->dev), 1))
		bond_validate_arp(bond, slave, sip, tip);
2542 2543

out_unlock:
2544 2545
	if (arp != (struct arphdr *)skb->data)
		kfree(arp);
2546
	return RX_HANDLER_ANOTHER;
2547 2548
}

2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562
/* function to verify if we're in the arp_interval timeslice, returns true if
 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
 */
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
				  int mod)
{
	int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);

	return time_in_range(jiffies,
			     last_act - delta_in_ticks,
			     last_act + mod * delta_in_ticks + delta_in_ticks/2);
}

2563
/* This function is called regularly to monitor each slave's link
L
Linus Torvalds 已提交
2564 2565 2566 2567 2568
 * ensuring that traffic is being sent and received when arp monitoring
 * is used in load-balancing mode. if the adapter has been dormant, then an
 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
 * arp monitoring in active backup mode.
 */
2569
static void bond_loadbalance_arp_mon(struct work_struct *work)
L
Linus Torvalds 已提交
2570
{
2571 2572
	struct bonding *bond = container_of(work, struct bonding,
					    arp_work.work);
L
Linus Torvalds 已提交
2573
	struct slave *slave, *oldcurrent;
2574
	struct list_head *iter;
2575
	int do_failover = 0, slave_state_changed = 0;
L
Linus Torvalds 已提交
2576

2577
	if (!bond_has_slaves(bond))
L
Linus Torvalds 已提交
2578 2579
		goto re_arm;

2580 2581
	rcu_read_lock();

2582
	oldcurrent = rcu_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
2583 2584
	/* see if any of the previous devices are up now (i.e. they have
	 * xmt and rcv traffic). the curr_active_slave does not come into
2585 2586 2587
	 * the picture unless it is null. also, slave->last_link_up is not
	 * needed here because we send an arp on each slave and give a slave
	 * as long as it needs to get the tx/rx within the delta.
L
Linus Torvalds 已提交
2588 2589 2590
	 * TODO: what about up/down delay in arp mode? it wasn't here before
	 *       so it can wait
	 */
2591
	bond_for_each_slave_rcu(bond, slave, iter) {
2592 2593
		unsigned long trans_start = dev_trans_start(slave->dev);

L
Linus Torvalds 已提交
2594
		if (slave->link != BOND_LINK_UP) {
2595
			if (bond_time_in_interval(bond, trans_start, 1) &&
2596
			    bond_time_in_interval(bond, slave->last_rx, 1)) {
L
Linus Torvalds 已提交
2597 2598

				slave->link  = BOND_LINK_UP;
2599
				slave_state_changed = 1;
L
Linus Torvalds 已提交
2600 2601 2602 2603 2604 2605 2606

				/* primary_slave has no meaning in round-robin
				 * mode. the window of a slave being up and
				 * curr_active_slave being null after enslaving
				 * is closed.
				 */
				if (!oldcurrent) {
2607 2608
					netdev_info(bond->dev, "link status definitely up for interface %s\n",
						    slave->dev->name);
L
Linus Torvalds 已提交
2609 2610
					do_failover = 1;
				} else {
2611 2612
					netdev_info(bond->dev, "interface %s is now up\n",
						    slave->dev->name);
L
Linus Torvalds 已提交
2613 2614 2615 2616 2617 2618 2619 2620 2621
				}
			}
		} else {
			/* slave->link == BOND_LINK_UP */

			/* not all switches will respond to an arp request
			 * when the source ip is 0, so don't take the link down
			 * if we don't know our ip yet
			 */
2622
			if (!bond_time_in_interval(bond, trans_start, 2) ||
2623
			    !bond_time_in_interval(bond, slave->last_rx, 2)) {
L
Linus Torvalds 已提交
2624 2625

				slave->link  = BOND_LINK_DOWN;
2626
				slave_state_changed = 1;
L
Linus Torvalds 已提交
2627

S
Stephen Hemminger 已提交
2628
				if (slave->link_failure_count < UINT_MAX)
L
Linus Torvalds 已提交
2629 2630
					slave->link_failure_count++;

2631 2632
				netdev_info(bond->dev, "interface %s is now down\n",
					    slave->dev->name);
L
Linus Torvalds 已提交
2633

S
Stephen Hemminger 已提交
2634
				if (slave == oldcurrent)
L
Linus Torvalds 已提交
2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645
					do_failover = 1;
			}
		}

		/* note: if switch is in round-robin mode, all links
		 * must tx arp to ensure all links rx an arp - otherwise
		 * links may oscillate or not come up at all; if switch is
		 * in something like xor mode, there is nothing we can
		 * do - all replies will be rx'ed on same link causing slaves
		 * to be unstable during low/no traffic periods
		 */
2646
		if (bond_slave_is_up(slave))
L
Linus Torvalds 已提交
2647 2648 2649
			bond_arp_send_all(bond, slave);
	}

2650 2651
	rcu_read_unlock();

2652
	if (do_failover || slave_state_changed) {
2653 2654
		if (!rtnl_trylock())
			goto re_arm;
L
Linus Torvalds 已提交
2655

2656 2657
		if (slave_state_changed) {
			bond_slave_state_change(bond);
2658 2659
			if (BOND_MODE(bond) == BOND_MODE_XOR)
				bond_update_slave_arr(bond, NULL);
2660 2661
		}
		if (do_failover) {
2662 2663 2664 2665
			block_netpoll_tx();
			bond_select_active_slave(bond);
			unblock_netpoll_tx();
		}
2666
		rtnl_unlock();
L
Linus Torvalds 已提交
2667 2668 2669
	}

re_arm:
2670
	if (bond->params.arp_interval)
2671 2672
		queue_delayed_work(bond->wq, &bond->arp_work,
				   msecs_to_jiffies(bond->params.arp_interval));
L
Linus Torvalds 已提交
2673 2674
}

2675
/* Called to inspect slaves for active-backup mode ARP monitor link state
2676 2677 2678 2679
 * changes.  Sets new_link in slaves to specify what action should take
 * place for the slave.  Returns 0 if no changes are found, >0 if changes
 * to link states must be committed.
 *
2680
 * Called with rcu_read_lock held.
L
Linus Torvalds 已提交
2681
 */
2682
static int bond_ab_arp_inspect(struct bonding *bond)
L
Linus Torvalds 已提交
2683
{
2684
	unsigned long trans_start, last_rx;
2685
	struct list_head *iter;
2686 2687
	struct slave *slave;
	int commit = 0;
2688

2689
	bond_for_each_slave_rcu(bond, slave, iter) {
2690
		slave->new_link = BOND_LINK_NOCHANGE;
2691
		last_rx = slave_last_rx(bond, slave);
L
Linus Torvalds 已提交
2692

2693
		if (slave->link != BOND_LINK_UP) {
2694
			if (bond_time_in_interval(bond, last_rx, 1)) {
2695 2696 2697 2698 2699
				slave->new_link = BOND_LINK_UP;
				commit++;
			}
			continue;
		}
L
Linus Torvalds 已提交
2700

2701
		/* Give slaves 2*delta after being enslaved or made
2702 2703 2704
		 * active.  This avoids bouncing, as the last receive
		 * times need a full ARP monitor cycle to be updated.
		 */
2705
		if (bond_time_in_interval(bond, slave->last_link_up, 2))
2706 2707
			continue;

2708
		/* Backup slave is down if:
2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
		 * - No current_arp_slave AND
		 * - more than 3*delta since last receive AND
		 * - the bond has an IP address
		 *
		 * Note: a non-null current_arp_slave indicates
		 * the curr_active_slave went down and we are
		 * searching for a new one; under this condition
		 * we only take the curr_active_slave down - this
		 * gives each slave a chance to tx/rx traffic
		 * before being taken out
		 */
J
Jiri Pirko 已提交
2720
		if (!bond_is_active_slave(slave) &&
2721
		    !rcu_access_pointer(bond->current_arp_slave) &&
2722
		    !bond_time_in_interval(bond, last_rx, 3)) {
2723 2724 2725 2726
			slave->new_link = BOND_LINK_DOWN;
			commit++;
		}

2727
		/* Active slave is down if:
2728 2729 2730 2731
		 * - more than 2*delta since transmitting OR
		 * - (more than 2*delta since receive AND
		 *    the bond has an IP address)
		 */
2732
		trans_start = dev_trans_start(slave->dev);
J
Jiri Pirko 已提交
2733
		if (bond_is_active_slave(slave) &&
2734 2735
		    (!bond_time_in_interval(bond, trans_start, 2) ||
		     !bond_time_in_interval(bond, last_rx, 2))) {
2736 2737 2738
			slave->new_link = BOND_LINK_DOWN;
			commit++;
		}
L
Linus Torvalds 已提交
2739 2740
	}

2741 2742
	return commit;
}
L
Linus Torvalds 已提交
2743

2744
/* Called to commit link state changes noted by inspection step of
2745 2746
 * active-backup mode ARP monitor.
 *
2747
 * Called with RTNL hold.
2748
 */
2749
static void bond_ab_arp_commit(struct bonding *bond)
2750
{
2751
	unsigned long trans_start;
2752
	struct list_head *iter;
2753
	struct slave *slave;
L
Linus Torvalds 已提交
2754

2755
	bond_for_each_slave(bond, slave, iter) {
2756 2757 2758
		switch (slave->new_link) {
		case BOND_LINK_NOCHANGE:
			continue;
2759

2760
		case BOND_LINK_UP:
2761
			trans_start = dev_trans_start(slave->dev);
2762 2763
			if (rtnl_dereference(bond->curr_active_slave) != slave ||
			    (!rtnl_dereference(bond->curr_active_slave) &&
2764
			     bond_time_in_interval(bond, trans_start, 1))) {
2765 2766 2767
				struct slave *current_arp_slave;

				current_arp_slave = rtnl_dereference(bond->current_arp_slave);
2768 2769
				bond_set_slave_link_state(slave, BOND_LINK_UP,
							  BOND_SLAVE_NOTIFY_NOW);
2770
				if (current_arp_slave) {
2771
					bond_set_slave_inactive_flags(
2772
						current_arp_slave,
2773
						BOND_SLAVE_NOTIFY_NOW);
2774
					RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2775
				}
2776

2777 2778
				netdev_info(bond->dev, "link status definitely up for interface %s\n",
					    slave->dev->name);
2779

2780
				if (!rtnl_dereference(bond->curr_active_slave) ||
2781
				    slave == rtnl_dereference(bond->primary_slave))
2782
					goto do_failover;
L
Linus Torvalds 已提交
2783

2784
			}
L
Linus Torvalds 已提交
2785

2786
			continue;
L
Linus Torvalds 已提交
2787

2788 2789 2790 2791
		case BOND_LINK_DOWN:
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;

2792 2793
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
2794 2795
			bond_set_slave_inactive_flags(slave,
						      BOND_SLAVE_NOTIFY_NOW);
2796

2797 2798
			netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
				    slave->dev->name);
2799

2800
			if (slave == rtnl_dereference(bond->curr_active_slave)) {
2801
				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2802
				goto do_failover;
L
Linus Torvalds 已提交
2803
			}
2804 2805

			continue;
2806 2807

		default:
2808 2809
			netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
				   slave->new_link, slave->dev->name);
2810
			continue;
L
Linus Torvalds 已提交
2811 2812
		}

2813
do_failover:
2814
		block_netpoll_tx();
2815
		bond_select_active_slave(bond);
2816
		unblock_netpoll_tx();
2817
	}
L
Linus Torvalds 已提交
2818

2819 2820
	bond_set_carrier(bond);
}
L
Linus Torvalds 已提交
2821

2822
/* Send ARP probes for active-backup mode ARP monitor.
2823
 *
2824
 * Called with rcu_read_lock held.
2825
 */
2826
static bool bond_ab_arp_probe(struct bonding *bond)
2827
{
2828
	struct slave *slave, *before = NULL, *new_slave = NULL,
2829 2830
		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
		     *curr_active_slave = rcu_dereference(bond->curr_active_slave);
2831 2832
	struct list_head *iter;
	bool found = false;
2833
	bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
2834

2835
	if (curr_arp_slave && curr_active_slave)
2836 2837 2838
		netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
			    curr_arp_slave->dev->name,
			    curr_active_slave->dev->name);
L
Linus Torvalds 已提交
2839

2840 2841
	if (curr_active_slave) {
		bond_arp_send_all(bond, curr_active_slave);
2842
		return should_notify_rtnl;
2843
	}
L
Linus Torvalds 已提交
2844

2845 2846 2847 2848
	/* if we don't have a curr_active_slave, search for the next available
	 * backup slave from the current_arp_slave and make it the candidate
	 * for becoming the curr_active_slave
	 */
L
Linus Torvalds 已提交
2849

2850
	if (!curr_arp_slave) {
2851 2852 2853
		curr_arp_slave = bond_first_slave_rcu(bond);
		if (!curr_arp_slave)
			return should_notify_rtnl;
2854
	}
L
Linus Torvalds 已提交
2855

2856
	bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
2857

2858
	bond_for_each_slave_rcu(bond, slave, iter) {
2859
		if (!found && !before && bond_slave_is_up(slave))
2860
			before = slave;
L
Linus Torvalds 已提交
2861

2862
		if (found && !new_slave && bond_slave_is_up(slave))
2863
			new_slave = slave;
2864 2865 2866 2867 2868 2869
		/* if the link state is up at this point, we
		 * mark it down - this can happen if we have
		 * simultaneous link failures and
		 * reselect_active_interface doesn't make this
		 * one the current slave so it is still marked
		 * up when it is actually down
L
Linus Torvalds 已提交
2870
		 */
2871
		if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
2872 2873
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_LATER);
2874 2875
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;
L
Linus Torvalds 已提交
2876

2877
			bond_set_slave_inactive_flags(slave,
2878
						      BOND_SLAVE_NOTIFY_LATER);
2879

2880 2881
			netdev_info(bond->dev, "backup interface %s is now down\n",
				    slave->dev->name);
L
Linus Torvalds 已提交
2882
		}
2883
		if (slave == curr_arp_slave)
2884
			found = true;
2885
	}
2886 2887 2888 2889

	if (!new_slave && before)
		new_slave = before;

2890 2891
	if (!new_slave)
		goto check_state;
2892

2893 2894
	bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
				  BOND_SLAVE_NOTIFY_LATER);
2895
	bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
2896
	bond_arp_send_all(bond, new_slave);
2897
	new_slave->last_link_up = jiffies;
2898
	rcu_assign_pointer(bond->current_arp_slave, new_slave);
2899

2900 2901
check_state:
	bond_for_each_slave_rcu(bond, slave, iter) {
2902
		if (slave->should_notify || slave->should_notify_link) {
2903 2904 2905 2906 2907
			should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
			break;
		}
	}
	return should_notify_rtnl;
2908
}
L
Linus Torvalds 已提交
2909

2910
static void bond_activebackup_arp_mon(struct work_struct *work)
2911 2912 2913
{
	struct bonding *bond = container_of(work, struct bonding,
					    arp_work.work);
2914 2915
	bool should_notify_peers = false;
	bool should_notify_rtnl = false;
2916
	int delta_in_ticks;
L
Linus Torvalds 已提交
2917

2918 2919 2920
	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);

	if (!bond_has_slaves(bond))
2921 2922
		goto re_arm;

2923
	rcu_read_lock();
2924

2925 2926
	should_notify_peers = bond_should_notify_peers(bond);

2927 2928 2929
	if (bond_ab_arp_inspect(bond)) {
		rcu_read_unlock();

2930 2931 2932 2933 2934 2935
		/* Race avoidance with bond_close flush of workqueue */
		if (!rtnl_trylock()) {
			delta_in_ticks = 1;
			should_notify_peers = false;
			goto re_arm;
		}
2936

2937
		bond_ab_arp_commit(bond);
2938

2939
		rtnl_unlock();
2940
		rcu_read_lock();
2941 2942
	}

2943 2944
	should_notify_rtnl = bond_ab_arp_probe(bond);
	rcu_read_unlock();
2945

2946 2947
re_arm:
	if (bond->params.arp_interval)
2948 2949
		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);

2950
	if (should_notify_peers || should_notify_rtnl) {
2951 2952
		if (!rtnl_trylock())
			return;
2953 2954 2955 2956

		if (should_notify_peers)
			call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
						 bond->dev);
2957
		if (should_notify_rtnl) {
2958
			bond_slave_state_notify(bond);
2959 2960
			bond_slave_link_notify(bond);
		}
2961

2962 2963
		rtnl_unlock();
	}
L
Linus Torvalds 已提交
2964 2965 2966 2967
}

/*-------------------------- netdev event handling --------------------------*/

2968
/* Change device name */
L
Linus Torvalds 已提交
2969 2970 2971 2972
static int bond_event_changename(struct bonding *bond)
{
	bond_remove_proc_entry(bond);
	bond_create_proc_entry(bond);
2973

2974 2975
	bond_debug_reregister(bond);

L
Linus Torvalds 已提交
2976 2977 2978
	return NOTIFY_DONE;
}

S
Stephen Hemminger 已提交
2979 2980
static int bond_master_netdev_event(unsigned long event,
				    struct net_device *bond_dev)
L
Linus Torvalds 已提交
2981
{
2982
	struct bonding *event_bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
2983 2984 2985 2986

	switch (event) {
	case NETDEV_CHANGENAME:
		return bond_event_changename(event_bond);
2987 2988 2989 2990 2991 2992
	case NETDEV_UNREGISTER:
		bond_remove_proc_entry(event_bond);
		break;
	case NETDEV_REGISTER:
		bond_create_proc_entry(event_bond);
		break;
2993 2994 2995 2996
	case NETDEV_NOTIFY_PEERS:
		if (event_bond->send_peer_notif)
			event_bond->send_peer_notif--;
		break;
L
Linus Torvalds 已提交
2997 2998 2999 3000 3001 3002 3003
	default:
		break;
	}

	return NOTIFY_DONE;
}

S
Stephen Hemminger 已提交
3004 3005
static int bond_slave_netdev_event(unsigned long event,
				   struct net_device *slave_dev)
L
Linus Torvalds 已提交
3006
{
3007
	struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3008 3009
	struct bonding *bond;
	struct net_device *bond_dev;
L
Linus Torvalds 已提交
3010

3011 3012 3013 3014 3015 3016 3017 3018
	/* A netdev event can be generated while enslaving a device
	 * before netdev_rx_handler_register is called in which case
	 * slave will be NULL
	 */
	if (!slave)
		return NOTIFY_DONE;
	bond_dev = slave->bond->dev;
	bond = slave->bond;
3019
	primary = rtnl_dereference(bond->primary_slave);
3020

L
Linus Torvalds 已提交
3021 3022
	switch (event) {
	case NETDEV_UNREGISTER:
3023
		if (bond_dev->type != ARPHRD_ETHER)
3024 3025 3026
			bond_release_and_destroy(bond_dev, slave_dev);
		else
			bond_release(bond_dev, slave_dev);
L
Linus Torvalds 已提交
3027
		break;
3028
	case NETDEV_UP:
L
Linus Torvalds 已提交
3029
	case NETDEV_CHANGE:
3030
		bond_update_speed_duplex(slave);
3031 3032
		if (BOND_MODE(bond) == BOND_MODE_8023AD)
			bond_3ad_adapter_speed_duplex_changed(slave);
M
Mahesh Bandewar 已提交
3033 3034
		/* Fallthrough */
	case NETDEV_DOWN:
3035 3036 3037 3038 3039 3040 3041 3042 3043 3044
		/* Refresh slave-array if applicable!
		 * If the setup does not use miimon or arpmon (mode-specific!),
		 * then these events will not cause the slave-array to be
		 * refreshed. This will cause xmit to use a slave that is not
		 * usable. Avoid such situation by refeshing the array at these
		 * events. If these (miimon/arpmon) parameters are configured
		 * then array gets refreshed twice and that should be fine!
		 */
		if (bond_mode_uses_xmit_hash(bond))
			bond_update_slave_arr(bond, NULL);
L
Linus Torvalds 已提交
3045 3046
		break;
	case NETDEV_CHANGEMTU:
3047
		/* TODO: Should slaves be allowed to
L
Linus Torvalds 已提交
3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
		 * independently alter their MTU?  For
		 * an active-backup bond, slaves need
		 * not be the same type of device, so
		 * MTUs may vary.  For other modes,
		 * slaves arguably should have the
		 * same MTUs. To do this, we'd need to
		 * take over the slave's change_mtu
		 * function for the duration of their
		 * servitude.
		 */
		break;
	case NETDEV_CHANGENAME:
3060
		/* we don't care if we don't have primary set */
3061
		if (!bond_uses_primary(bond) ||
3062 3063 3064
		    !bond->params.primary[0])
			break;

3065
		if (slave == primary) {
3066
			/* slave's name changed - he's no longer primary */
3067
			RCU_INIT_POINTER(bond->primary_slave, NULL);
3068 3069
		} else if (!strcmp(slave_dev->name, bond->params.primary)) {
			/* we have a new primary slave */
3070
			rcu_assign_pointer(bond->primary_slave, slave);
3071 3072 3073 3074
		} else { /* we didn't change primary - exit */
			break;
		}

3075
		netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3076
			    primary ? slave_dev->name : "none");
3077 3078

		block_netpoll_tx();
3079
		bond_select_active_slave(bond);
3080
		unblock_netpoll_tx();
L
Linus Torvalds 已提交
3081
		break;
3082 3083 3084
	case NETDEV_FEAT_CHANGE:
		bond_compute_features(bond);
		break;
3085 3086 3087 3088
	case NETDEV_RESEND_IGMP:
		/* Propagate to master device */
		call_netdevice_notifiers(event, slave->bond->dev);
		break;
L
Linus Torvalds 已提交
3089 3090 3091 3092 3093 3094 3095
	default:
		break;
	}

	return NOTIFY_DONE;
}

3096
/* bond_netdev_event: handle netdev notifier chain events.
L
Linus Torvalds 已提交
3097 3098
 *
 * This function receives events for the netdev chain.  The caller (an
3099
 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
L
Linus Torvalds 已提交
3100 3101 3102
 * locks for us to safely manipulate the slave devices (RTNL lock,
 * dev_probe_lock).
 */
S
Stephen Hemminger 已提交
3103 3104
static int bond_netdev_event(struct notifier_block *this,
			     unsigned long event, void *ptr)
L
Linus Torvalds 已提交
3105
{
3106
	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
L
Linus Torvalds 已提交
3107

3108
	netdev_dbg(event_dev, "event: %lx\n", event);
L
Linus Torvalds 已提交
3109

3110 3111 3112
	if (!(event_dev->priv_flags & IFF_BONDING))
		return NOTIFY_DONE;

L
Linus Torvalds 已提交
3113
	if (event_dev->flags & IFF_MASTER) {
3114
		netdev_dbg(event_dev, "IFF_MASTER\n");
L
Linus Torvalds 已提交
3115 3116 3117 3118
		return bond_master_netdev_event(event, event_dev);
	}

	if (event_dev->flags & IFF_SLAVE) {
3119
		netdev_dbg(event_dev, "IFF_SLAVE\n");
L
Linus Torvalds 已提交
3120 3121 3122 3123 3124 3125 3126 3127 3128 3129
		return bond_slave_netdev_event(event, event_dev);
	}

	return NOTIFY_DONE;
}

static struct notifier_block bond_netdev_notifier = {
	.notifier_call = bond_netdev_event,
};

3130 3131
/*---------------------------- Hashing Policies -----------------------------*/

3132 3133
/* L2 hash helper */
static inline u32 bond_eth_hash(struct sk_buff *skb)
3134
{
3135
	struct ethhdr *ep, hdr_tmp;
3136

3137 3138 3139
	ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
	if (ep)
		return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
3140 3141 3142
	return 0;
}

3143 3144 3145
/* Extract the appropriate headers based on bond's xmit policy */
static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
			      struct flow_keys *fk)
3146
{
3147
	const struct ipv6hdr *iph6;
3148
	const struct iphdr *iph;
3149
	int noff, proto = -1;
3150

3151
	if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
3152
		return skb_flow_dissect_flow_keys(skb, fk, 0);
3153

3154
	fk->ports.ports = 0;
3155 3156
	noff = skb_network_offset(skb);
	if (skb->protocol == htons(ETH_P_IP)) {
3157
		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
3158
			return false;
3159
		iph = ip_hdr(skb);
3160
		iph_to_flow_copy_v4addrs(fk, iph);
3161 3162 3163 3164
		noff += iph->ihl << 2;
		if (!ip_is_fragment(iph))
			proto = iph->protocol;
	} else if (skb->protocol == htons(ETH_P_IPV6)) {
3165
		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
3166 3167
			return false;
		iph6 = ipv6_hdr(skb);
3168
		iph_to_flow_copy_v6addrs(fk, iph6);
3169 3170 3171 3172
		noff += sizeof(*iph6);
		proto = iph6->nexthdr;
	} else {
		return false;
3173
	}
3174
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
3175
		fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
3176

3177
	return true;
3178 3179
}

3180 3181 3182 3183 3184 3185 3186
/**
 * bond_xmit_hash - generate a hash value based on the xmit policy
 * @bond: bonding device
 * @skb: buffer to use for headers
 *
 * This function will extract the necessary headers from the skb buffer and use
 * them to generate a hash based on the xmit_policy set in the bonding device
3187
 */
3188
u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
3189
{
3190 3191
	struct flow_keys flow;
	u32 hash;
3192

E
Eric Dumazet 已提交
3193 3194 3195 3196
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
	    skb->l4_hash)
		return skb->hash;

3197 3198
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
	    !bond_flow_dissect(bond, skb, &flow))
3199
		return bond_eth_hash(skb);
3200

3201 3202 3203 3204
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
		hash = bond_eth_hash(skb);
	else
3205
		hash = (__force u32)flow.ports.ports;
3206 3207
	hash ^= (__force u32)flow_get_u32_dst(&flow) ^
		(__force u32)flow_get_u32_src(&flow);
3208 3209 3210
	hash ^= (hash >> 16);
	hash ^= (hash >> 8);

3211
	return hash;
3212 3213
}

L
Linus Torvalds 已提交
3214 3215
/*-------------------------- Device entry points ----------------------------*/

3216 3217 3218 3219 3220 3221
static void bond_work_init_all(struct bonding *bond)
{
	INIT_DELAYED_WORK(&bond->mcast_work,
			  bond_resend_igmp_join_requests_delayed);
	INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
	INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3222
	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3223 3224 3225 3226
		INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
	else
		INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
	INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3227
	INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
3228 3229 3230 3231 3232 3233 3234 3235 3236
}

static void bond_work_cancel_all(struct bonding *bond)
{
	cancel_delayed_work_sync(&bond->mii_work);
	cancel_delayed_work_sync(&bond->arp_work);
	cancel_delayed_work_sync(&bond->alb_work);
	cancel_delayed_work_sync(&bond->ad_work);
	cancel_delayed_work_sync(&bond->mcast_work);
3237
	cancel_delayed_work_sync(&bond->slave_arr_work);
3238 3239
}

L
Linus Torvalds 已提交
3240 3241
static int bond_open(struct net_device *bond_dev)
{
3242
	struct bonding *bond = netdev_priv(bond_dev);
3243
	struct list_head *iter;
3244
	struct slave *slave;
L
Linus Torvalds 已提交
3245

3246
	/* reset slave->backup and slave->inactive */
3247
	if (bond_has_slaves(bond)) {
3248
		bond_for_each_slave(bond, slave, iter) {
3249 3250
			if (bond_uses_primary(bond) &&
			    slave != rcu_access_pointer(bond->curr_active_slave)) {
3251 3252
				bond_set_slave_inactive_flags(slave,
							      BOND_SLAVE_NOTIFY_NOW);
3253
			} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
3254 3255
				bond_set_slave_active_flags(slave,
							    BOND_SLAVE_NOTIFY_NOW);
3256 3257 3258 3259
			}
		}
	}

3260
	bond_work_init_all(bond);
3261

3262
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
3263 3264 3265
		/* bond_alb_initialize must be called before the timer
		 * is started.
		 */
3266
		if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3267
			return -ENOMEM;
3268 3269
		if (bond->params.tlb_dynamic_lb)
			queue_delayed_work(bond->wq, &bond->alb_work, 0);
L
Linus Torvalds 已提交
3270 3271
	}

3272
	if (bond->params.miimon)  /* link check interval, in milliseconds. */
3273
		queue_delayed_work(bond->wq, &bond->mii_work, 0);
L
Linus Torvalds 已提交
3274 3275

	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
3276
		queue_delayed_work(bond->wq, &bond->arp_work, 0);
3277
		bond->recv_probe = bond_arp_rcv;
L
Linus Torvalds 已提交
3278 3279
	}

3280
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3281
		queue_delayed_work(bond->wq, &bond->ad_work, 0);
L
Linus Torvalds 已提交
3282
		/* register to receive LACPDUs */
3283
		bond->recv_probe = bond_3ad_lacpdu_recv;
3284
		bond_3ad_initiate_agg_selection(bond, 1);
L
Linus Torvalds 已提交
3285 3286
	}

3287 3288 3289
	if (bond_mode_uses_xmit_hash(bond))
		bond_update_slave_arr(bond, NULL);

L
Linus Torvalds 已提交
3290 3291 3292 3293 3294
	return 0;
}

static int bond_close(struct net_device *bond_dev)
{
3295
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3296

3297
	bond_work_cancel_all(bond);
3298
	bond->send_peer_notif = 0;
3299
	if (bond_is_lb(bond))
L
Linus Torvalds 已提交
3300
		bond_alb_deinitialize(bond);
3301
	bond->recv_probe = NULL;
L
Linus Torvalds 已提交
3302 3303 3304 3305

	return 0;
}

3306 3307
static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
						struct rtnl_link_stats64 *stats)
L
Linus Torvalds 已提交
3308
{
3309
	struct bonding *bond = netdev_priv(bond_dev);
3310
	struct rtnl_link_stats64 temp;
3311
	struct list_head *iter;
L
Linus Torvalds 已提交
3312 3313
	struct slave *slave;

3314
	memcpy(stats, &bond->bond_stats, sizeof(*stats));
L
Linus Torvalds 已提交
3315

3316
	bond_for_each_slave(bond, slave, iter) {
3317
		const struct rtnl_link_stats64 *sstats =
3318
			dev_get_stats(slave->dev, &temp);
3319 3320 3321 3322 3323 3324
		struct rtnl_link_stats64 *pstats = &slave->slave_stats;

		stats->rx_packets +=  sstats->rx_packets - pstats->rx_packets;
		stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
		stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
		stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
3325
		stats->rx_nohandler += sstats->rx_nohandler - pstats->rx_nohandler;
3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351

		stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
		stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
		stats->tx_errors += sstats->tx_errors - pstats->tx_errors;
		stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped;

		stats->multicast += sstats->multicast - pstats->multicast;
		stats->collisions += sstats->collisions - pstats->collisions;

		stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors;
		stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors;
		stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors;
		stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors;
		stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors;
		stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors;

		stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors;
		stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors;
		stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors;
		stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors;
		stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors;

		/* save off the slave stats for the next run */
		memcpy(pstats, sstats, sizeof(*sstats));
	}
	memcpy(&bond->bond_stats, stats, sizeof(*stats));
L
Linus Torvalds 已提交
3352 3353 3354 3355 3356 3357

	return stats;
}

static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
3358
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3359 3360 3361 3362 3363 3364
	struct net_device *slave_dev = NULL;
	struct ifbond k_binfo;
	struct ifbond __user *u_binfo = NULL;
	struct ifslave k_sinfo;
	struct ifslave __user *u_sinfo = NULL;
	struct mii_ioctl_data *mii = NULL;
3365
	struct bond_opt_value newval;
3366
	struct net *net;
L
Linus Torvalds 已提交
3367 3368
	int res = 0;

3369
	netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
L
Linus Torvalds 已提交
3370 3371 3372 3373

	switch (cmd) {
	case SIOCGMIIPHY:
		mii = if_mii(ifr);
S
Stephen Hemminger 已提交
3374
		if (!mii)
L
Linus Torvalds 已提交
3375
			return -EINVAL;
S
Stephen Hemminger 已提交
3376

L
Linus Torvalds 已提交
3377 3378 3379
		mii->phy_id = 0;
		/* Fall Through */
	case SIOCGMIIREG:
3380
		/* We do this again just in case we were called by SIOCGMIIREG
L
Linus Torvalds 已提交
3381 3382 3383
		 * instead of SIOCGMIIPHY.
		 */
		mii = if_mii(ifr);
S
Stephen Hemminger 已提交
3384
		if (!mii)
L
Linus Torvalds 已提交
3385
			return -EINVAL;
S
Stephen Hemminger 已提交
3386

L
Linus Torvalds 已提交
3387 3388
		if (mii->reg_num == 1) {
			mii->val_out = 0;
S
Stephen Hemminger 已提交
3389
			if (netif_carrier_ok(bond->dev))
L
Linus Torvalds 已提交
3390 3391 3392 3393 3394 3395 3396 3397
				mii->val_out = BMSR_LSTATUS;
		}

		return 0;
	case BOND_INFO_QUERY_OLD:
	case SIOCBONDINFOQUERY:
		u_binfo = (struct ifbond __user *)ifr->ifr_data;

S
Stephen Hemminger 已提交
3398
		if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
L
Linus Torvalds 已提交
3399 3400 3401
			return -EFAULT;

		res = bond_info_query(bond_dev, &k_binfo);
S
Stephen Hemminger 已提交
3402 3403 3404
		if (res == 0 &&
		    copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
			return -EFAULT;
L
Linus Torvalds 已提交
3405 3406 3407 3408 3409 3410

		return res;
	case BOND_SLAVE_INFO_QUERY_OLD:
	case SIOCBONDSLAVEINFOQUERY:
		u_sinfo = (struct ifslave __user *)ifr->ifr_data;

S
Stephen Hemminger 已提交
3411
		if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
L
Linus Torvalds 已提交
3412 3413 3414
			return -EFAULT;

		res = bond_slave_info_query(bond_dev, &k_sinfo);
S
Stephen Hemminger 已提交
3415 3416 3417
		if (res == 0 &&
		    copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
			return -EFAULT;
L
Linus Torvalds 已提交
3418 3419 3420 3421 3422 3423

		return res;
	default:
		break;
	}

3424 3425 3426
	net = dev_net(bond_dev);

	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
L
Linus Torvalds 已提交
3427 3428
		return -EPERM;

3429
	slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
L
Linus Torvalds 已提交
3430

3431
	netdev_dbg(bond_dev, "slave_dev=%p:\n", slave_dev);
L
Linus Torvalds 已提交
3432

S
Stephen Hemminger 已提交
3433
	if (!slave_dev)
3434
		return -ENODEV;
L
Linus Torvalds 已提交
3435

3436
	netdev_dbg(bond_dev, "slave_dev->name=%s:\n", slave_dev->name);
3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452
	switch (cmd) {
	case BOND_ENSLAVE_OLD:
	case SIOCBONDENSLAVE:
		res = bond_enslave(bond_dev, slave_dev);
		break;
	case BOND_RELEASE_OLD:
	case SIOCBONDRELEASE:
		res = bond_release(bond_dev, slave_dev);
		break;
	case BOND_SETHWADDR_OLD:
	case SIOCBONDSETHWADDR:
		bond_set_dev_addr(bond_dev, slave_dev);
		res = 0;
		break;
	case BOND_CHANGE_ACTIVE_OLD:
	case SIOCBONDCHANGEACTIVE:
3453 3454
		bond_opt_initstr(&newval, slave_dev->name);
		res = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
3455 3456 3457
		break;
	default:
		res = -EOPNOTSUPP;
L
Linus Torvalds 已提交
3458 3459 3460 3461 3462
	}

	return res;
}

3463
static void bond_change_rx_flags(struct net_device *bond_dev, int change)
L
Linus Torvalds 已提交
3464
{
3465
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3466

3467 3468 3469
	if (change & IFF_PROMISC)
		bond_set_promiscuity(bond,
				     bond_dev->flags & IFF_PROMISC ? 1 : -1);
S
Stephen Hemminger 已提交
3470

3471 3472 3473 3474
	if (change & IFF_ALLMULTI)
		bond_set_allmulti(bond,
				  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
}
L
Linus Torvalds 已提交
3475

3476
static void bond_set_rx_mode(struct net_device *bond_dev)
3477 3478
{
	struct bonding *bond = netdev_priv(bond_dev);
3479
	struct list_head *iter;
3480
	struct slave *slave;
L
Linus Torvalds 已提交
3481

3482
	rcu_read_lock();
3483
	if (bond_uses_primary(bond)) {
3484
		slave = rcu_dereference(bond->curr_active_slave);
3485 3486 3487 3488 3489
		if (slave) {
			dev_uc_sync(slave->dev, bond_dev);
			dev_mc_sync(slave->dev, bond_dev);
		}
	} else {
3490
		bond_for_each_slave_rcu(bond, slave, iter) {
3491 3492 3493
			dev_uc_sync_multiple(slave->dev, bond_dev);
			dev_mc_sync_multiple(slave->dev, bond_dev);
		}
L
Linus Torvalds 已提交
3494
	}
3495
	rcu_read_unlock();
L
Linus Torvalds 已提交
3496 3497
}

3498
static int bond_neigh_init(struct neighbour *n)
3499
{
3500 3501 3502
	struct bonding *bond = netdev_priv(n->dev);
	const struct net_device_ops *slave_ops;
	struct neigh_parms parms;
3503
	struct slave *slave;
3504 3505
	int ret;

3506
	slave = bond_first_slave(bond);
3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518
	if (!slave)
		return 0;
	slave_ops = slave->dev->netdev_ops;
	if (!slave_ops->ndo_neigh_setup)
		return 0;

	parms.neigh_setup = NULL;
	parms.neigh_cleanup = NULL;
	ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
	if (ret)
		return ret;

3519
	/* Assign slave's neigh_cleanup to neighbour in case cleanup is called
3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531
	 * after the last slave has been detached.  Assumes that all slaves
	 * utilize the same neigh_cleanup (true at this writing as only user
	 * is ipoib).
	 */
	n->parms->neigh_cleanup = parms.neigh_cleanup;

	if (!parms.neigh_setup)
		return 0;

	return parms.neigh_setup(n);
}

3532
/* The bonding ndo_neigh_setup is called at init time beofre any
3533 3534
 * slave exists. So we must declare proxy setup function which will
 * be used at run time to resolve the actual slave neigh param setup.
3535 3536 3537 3538
 *
 * It's also called by master devices (such as vlans) to setup their
 * underlying devices. In that case - do nothing, we're already set up from
 * our init.
3539 3540 3541 3542
 */
static int bond_neigh_setup(struct net_device *dev,
			    struct neigh_parms *parms)
{
3543 3544 3545
	/* modify only our neigh_parms */
	if (parms->dev == dev)
		parms->neigh_setup = bond_neigh_init;
3546 3547 3548 3549

	return 0;
}

3550
/* Change the MTU of all of a master's slaves to match the master */
L
Linus Torvalds 已提交
3551 3552
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
3553
	struct bonding *bond = netdev_priv(bond_dev);
3554
	struct slave *slave, *rollback_slave;
3555
	struct list_head *iter;
L
Linus Torvalds 已提交
3556 3557
	int res = 0;

3558
	netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
L
Linus Torvalds 已提交
3559

3560
	bond_for_each_slave(bond, slave, iter) {
3561 3562
		netdev_dbg(bond_dev, "s %p c_m %p\n",
			   slave, slave->dev->netdev_ops->ndo_change_mtu);
3563

L
Linus Torvalds 已提交
3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574
		res = dev_set_mtu(slave->dev, new_mtu);

		if (res) {
			/* If we failed to set the slave's mtu to the new value
			 * we must abort the operation even in ACTIVE_BACKUP
			 * mode, because if we allow the backup slaves to have
			 * different mtu values than the active slave we'll
			 * need to change their mtu when doing a failover. That
			 * means changing their mtu from timer context, which
			 * is probably not a good idea.
			 */
3575 3576
			netdev_dbg(bond_dev, "err %d %s\n", res,
				   slave->dev->name);
L
Linus Torvalds 已提交
3577 3578 3579 3580 3581 3582 3583 3584 3585 3586
			goto unwind;
		}
	}

	bond_dev->mtu = new_mtu;

	return 0;

unwind:
	/* unwind from head to the slave that failed */
3587
	bond_for_each_slave(bond, rollback_slave, iter) {
L
Linus Torvalds 已提交
3588 3589
		int tmp_res;

3590 3591 3592 3593
		if (rollback_slave == slave)
			break;

		tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
L
Linus Torvalds 已提交
3594
		if (tmp_res) {
3595 3596
			netdev_dbg(bond_dev, "unwind err %d dev %s\n",
				   tmp_res, rollback_slave->dev->name);
L
Linus Torvalds 已提交
3597 3598 3599 3600 3601 3602
		}
	}

	return res;
}

3603
/* Change HW address
L
Linus Torvalds 已提交
3604 3605 3606 3607 3608 3609 3610
 *
 * Note that many devices must be down to change the HW address, and
 * downing the master releases all slaves.  We can make bonds full of
 * bonding devices to test this, however.
 */
static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
3611
	struct bonding *bond = netdev_priv(bond_dev);
3612
	struct slave *slave, *rollback_slave;
L
Linus Torvalds 已提交
3613
	struct sockaddr *sa = addr, tmp_sa;
3614
	struct list_head *iter;
L
Linus Torvalds 已提交
3615 3616
	int res = 0;

3617
	if (BOND_MODE(bond) == BOND_MODE_ALB)
3618 3619 3620
		return bond_alb_set_mac_address(bond_dev, addr);


3621
	netdev_dbg(bond_dev, "bond=%p\n", bond);
L
Linus Torvalds 已提交
3622

3623 3624
	/* If fail_over_mac is enabled, do nothing and return success.
	 * Returning an error causes ifenslave to fail.
3625
	 */
3626
	if (bond->params.fail_over_mac &&
3627
	    BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3628
		return 0;
3629

S
Stephen Hemminger 已提交
3630
	if (!is_valid_ether_addr(sa->sa_data))
L
Linus Torvalds 已提交
3631 3632
		return -EADDRNOTAVAIL;

3633
	bond_for_each_slave(bond, slave, iter) {
3634
		netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name);
L
Linus Torvalds 已提交
3635 3636 3637 3638 3639 3640 3641 3642
		res = dev_set_mac_address(slave->dev, addr);
		if (res) {
			/* TODO: consider downing the slave
			 * and retry ?
			 * User should expect communications
			 * breakage anyway until ARP finish
			 * updating, so...
			 */
3643
			netdev_dbg(bond_dev, "err %d %s\n", res, slave->dev->name);
L
Linus Torvalds 已提交
3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656
			goto unwind;
		}
	}

	/* success */
	memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
	return 0;

unwind:
	memcpy(tmp_sa.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
	tmp_sa.sa_family = bond_dev->type;

	/* unwind from head to the slave that failed */
3657
	bond_for_each_slave(bond, rollback_slave, iter) {
L
Linus Torvalds 已提交
3658 3659
		int tmp_res;

3660 3661 3662 3663
		if (rollback_slave == slave)
			break;

		tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
L
Linus Torvalds 已提交
3664
		if (tmp_res) {
3665 3666
			netdev_dbg(bond_dev, "unwind err %d dev %s\n",
				   tmp_res, rollback_slave->dev->name);
L
Linus Torvalds 已提交
3667 3668 3669 3670 3671 3672
		}
	}

	return res;
}

3673 3674 3675 3676 3677 3678 3679 3680 3681 3682
/**
 * bond_xmit_slave_id - transmit skb through slave with slave_id
 * @bond: bonding device that is transmitting
 * @skb: buffer to transmit
 * @slave_id: slave id up to slave_cnt-1 through which to transmit
 *
 * This function tries to transmit through slave with slave_id but in case
 * it fails, it tries to find the first available slave for transmission.
 * The skb is consumed in all cases, thus the function is void.
 */
3683
static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
3684
{
3685
	struct list_head *iter;
3686 3687 3688 3689
	struct slave *slave;
	int i = slave_id;

	/* Here we start from the slave with slave_id */
3690
	bond_for_each_slave_rcu(bond, slave, iter) {
3691
		if (--i < 0) {
3692
			if (bond_slave_can_tx(slave)) {
3693 3694 3695 3696 3697 3698 3699 3700
				bond_dev_queue_xmit(bond, skb, slave->dev);
				return;
			}
		}
	}

	/* Here we start from the first slave up to slave_id */
	i = slave_id;
3701
	bond_for_each_slave_rcu(bond, slave, iter) {
3702 3703
		if (--i < 0)
			break;
3704
		if (bond_slave_can_tx(slave)) {
3705 3706 3707 3708 3709
			bond_dev_queue_xmit(bond, skb, slave->dev);
			return;
		}
	}
	/* no slave that can tx has been found */
E
Eric Dumazet 已提交
3710
	bond_tx_drop(bond->dev, skb);
3711 3712
}

3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723
/**
 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
 * @bond: bonding device to use
 *
 * Based on the value of the bonding device's packets_per_slave parameter
 * this function generates a slave id, which is usually used as the next
 * slave to transmit through.
 */
static u32 bond_rr_gen_slave_id(struct bonding *bond)
{
	u32 slave_id;
3724 3725
	struct reciprocal_value reciprocal_packets_per_slave;
	int packets_per_slave = bond->params.packets_per_slave;
3726 3727 3728 3729 3730 3731 3732 3733 3734

	switch (packets_per_slave) {
	case 0:
		slave_id = prandom_u32();
		break;
	case 1:
		slave_id = bond->rr_tx_counter;
		break;
	default:
3735 3736
		reciprocal_packets_per_slave =
			bond->params.reciprocal_packets_per_slave;
3737
		slave_id = reciprocal_divide(bond->rr_tx_counter,
3738
					     reciprocal_packets_per_slave);
3739 3740 3741 3742 3743 3744 3745
		break;
	}
	bond->rr_tx_counter++;

	return slave_id;
}

L
Linus Torvalds 已提交
3746 3747
static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
{
3748
	struct bonding *bond = netdev_priv(bond_dev);
3749
	struct iphdr *iph = ip_hdr(skb);
3750
	struct slave *slave;
3751
	u32 slave_id;
L
Linus Torvalds 已提交
3752

3753
	/* Start with the curr_active_slave that joined the bond as the
3754 3755 3756 3757
	 * default for sending IGMP traffic.  For failover purposes one
	 * needs to maintain some consistency for the interface that will
	 * send the join/membership reports.  The curr_active_slave found
	 * will send all of this type of traffic.
3758
	 */
3759
	if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
3760
		slave = rcu_dereference(bond->curr_active_slave);
3761
		if (slave)
3762 3763 3764
			bond_dev_queue_xmit(bond, skb, slave->dev);
		else
			bond_xmit_slave_id(bond, skb, 0);
3765
	} else {
3766 3767 3768 3769 3770 3771
		int slave_cnt = ACCESS_ONCE(bond->slave_cnt);

		if (likely(slave_cnt)) {
			slave_id = bond_rr_gen_slave_id(bond);
			bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
		} else {
E
Eric Dumazet 已提交
3772
			bond_tx_drop(bond_dev, skb);
3773
		}
L
Linus Torvalds 已提交
3774
	}
3775

3776
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
3777 3778
}

3779
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
L
Linus Torvalds 已提交
3780 3781 3782 3783
 * the bond has a usable interface.
 */
static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev)
{
3784
	struct bonding *bond = netdev_priv(bond_dev);
3785
	struct slave *slave;
L
Linus Torvalds 已提交
3786

3787
	slave = rcu_dereference(bond->curr_active_slave);
3788
	if (slave)
3789 3790
		bond_dev_queue_xmit(bond, skb, slave->dev);
	else
E
Eric Dumazet 已提交
3791
		bond_tx_drop(bond_dev, skb);
3792

3793
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
3794 3795
}

3796 3797 3798
/* Use this to update slave_array when (a) it's not appropriate to update
 * slave_array right away (note that update_slave_array() may sleep)
 * and / or (b) RTNL is not held.
L
Linus Torvalds 已提交
3799
 */
3800
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
L
Linus Torvalds 已提交
3801
{
3802 3803
	queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
}
L
Linus Torvalds 已提交
3804

3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919
/* Slave array work handler. Holds only RTNL */
static void bond_slave_arr_handler(struct work_struct *work)
{
	struct bonding *bond = container_of(work, struct bonding,
					    slave_arr_work.work);
	int ret;

	if (!rtnl_trylock())
		goto err;

	ret = bond_update_slave_arr(bond, NULL);
	rtnl_unlock();
	if (ret) {
		pr_warn_ratelimited("Failed to update slave array from WT\n");
		goto err;
	}
	return;

err:
	bond_slave_arr_work_rearm(bond, 1);
}

/* Build the usable slaves array in control path for modes that use xmit-hash
 * to determine the slave interface -
 * (a) BOND_MODE_8023AD
 * (b) BOND_MODE_XOR
 * (c) BOND_MODE_TLB && tlb_dynamic_lb == 0
 *
 * The caller is expected to hold RTNL only and NO other lock!
 */
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
{
	struct slave *slave;
	struct list_head *iter;
	struct bond_up_slave *new_arr, *old_arr;
	int agg_id = 0;
	int ret = 0;

#ifdef CONFIG_LOCKDEP
	WARN_ON(lockdep_is_held(&bond->mode_lock));
#endif

	new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]),
			  GFP_KERNEL);
	if (!new_arr) {
		ret = -ENOMEM;
		pr_err("Failed to build slave-array.\n");
		goto out;
	}
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
		struct ad_info ad_info;

		if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
			pr_debug("bond_3ad_get_active_agg_info failed\n");
			kfree_rcu(new_arr, rcu);
			/* No active aggragator means it's not safe to use
			 * the previous array.
			 */
			old_arr = rtnl_dereference(bond->slave_arr);
			if (old_arr) {
				RCU_INIT_POINTER(bond->slave_arr, NULL);
				kfree_rcu(old_arr, rcu);
			}
			goto out;
		}
		agg_id = ad_info.aggregator_id;
	}
	bond_for_each_slave(bond, slave, iter) {
		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			struct aggregator *agg;

			agg = SLAVE_AD_INFO(slave)->port.aggregator;
			if (!agg || agg->aggregator_identifier != agg_id)
				continue;
		}
		if (!bond_slave_can_tx(slave))
			continue;
		if (skipslave == slave)
			continue;
		new_arr->arr[new_arr->count++] = slave;
	}

	old_arr = rtnl_dereference(bond->slave_arr);
	rcu_assign_pointer(bond->slave_arr, new_arr);
	if (old_arr)
		kfree_rcu(old_arr, rcu);
out:
	if (ret != 0 && skipslave) {
		int idx;

		/* Rare situation where caller has asked to skip a specific
		 * slave but allocation failed (most likely!). BTW this is
		 * only possible when the call is initiated from
		 * __bond_release_one(). In this situation; overwrite the
		 * skipslave entry in the array with the last entry from the
		 * array to avoid a situation where the xmit path may choose
		 * this to-be-skipped slave to send a packet out.
		 */
		old_arr = rtnl_dereference(bond->slave_arr);
		for (idx = 0; idx < old_arr->count; idx++) {
			if (skipslave == old_arr->arr[idx]) {
				old_arr->arr[idx] =
				    old_arr->arr[old_arr->count-1];
				old_arr->count--;
				break;
			}
		}
	}
	return ret;
}

/* Use this Xmit function for 3AD as well as XOR modes. The current
 * usable slave array is formed in the control path. The xmit function
 * just calculates hash and sends the packet out.
 */
3920
static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932
{
	struct bonding *bond = netdev_priv(dev);
	struct slave *slave;
	struct bond_up_slave *slaves;
	unsigned int count;

	slaves = rcu_dereference(bond->slave_arr);
	count = slaves ? ACCESS_ONCE(slaves->count) : 0;
	if (likely(count)) {
		slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
		bond_dev_queue_xmit(bond, skb, slave->dev);
	} else {
E
Eric Dumazet 已提交
3933
		bond_tx_drop(dev, skb);
3934
	}
3935

3936
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
3937 3938
}

3939
/* in broadcast mode, we send everything to all usable interfaces. */
L
Linus Torvalds 已提交
3940 3941
static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
{
3942
	struct bonding *bond = netdev_priv(bond_dev);
3943
	struct slave *slave = NULL;
3944
	struct list_head *iter;
L
Linus Torvalds 已提交
3945

3946
	bond_for_each_slave_rcu(bond, slave, iter) {
3947 3948
		if (bond_is_last_slave(bond, slave))
			break;
3949
		if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3950
			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
L
Linus Torvalds 已提交
3951

3952
			if (!skb2) {
3953 3954
				net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
						    bond_dev->name, __func__);
3955
				continue;
L
Linus Torvalds 已提交
3956
			}
3957
			bond_dev_queue_xmit(bond, skb2, slave->dev);
L
Linus Torvalds 已提交
3958 3959
		}
	}
3960
	if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
3961 3962
		bond_dev_queue_xmit(bond, skb, slave->dev);
	else
E
Eric Dumazet 已提交
3963
		bond_tx_drop(bond_dev, skb);
S
Stephen Hemminger 已提交
3964

3965
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
3966 3967 3968 3969
}

/*------------------------- Device initialization ---------------------------*/

3970
/* Lookup the slave that corresponds to a qid */
3971 3972 3973 3974
static inline int bond_slave_override(struct bonding *bond,
				      struct sk_buff *skb)
{
	struct slave *slave = NULL;
3975
	struct list_head *iter;
3976

3977 3978
	if (!skb->queue_mapping)
		return 1;
3979 3980

	/* Find out if any slaves have the same mapping as this skb. */
3981 3982
	bond_for_each_slave_rcu(bond, slave, iter) {
		if (slave->queue_id == skb->queue_mapping) {
3983 3984
			if (bond_slave_is_up(slave) &&
			    slave->link == BOND_LINK_UP) {
3985 3986 3987 3988
				bond_dev_queue_xmit(bond, skb, slave->dev);
				return 0;
			}
			/* If the slave isn't UP, use default transmit policy. */
3989 3990 3991 3992
			break;
		}
	}

3993
	return 1;
3994 3995
}

3996

3997
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
3998
			     void *accel_priv, select_queue_fallback_t fallback)
3999
{
4000
	/* This helper function exists to help dev_pick_tx get the correct
P
Phil Oester 已提交
4001
	 * destination queue.  Using a helper function skips a call to
4002 4003 4004
	 * skb_tx_hash and will put the skbs in the queue we expect on their
	 * way down to the bonding driver.
	 */
P
Phil Oester 已提交
4005 4006
	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;

4007
	/* Save the original txq to restore before passing to the driver */
4008
	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
4009

P
Phil Oester 已提交
4010
	if (unlikely(txq >= dev->real_num_tx_queues)) {
4011
		do {
P
Phil Oester 已提交
4012
			txq -= dev->real_num_tx_queues;
4013
		} while (txq >= dev->real_num_tx_queues);
P
Phil Oester 已提交
4014 4015
	}
	return txq;
4016 4017
}

4018
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4019
{
4020 4021
	struct bonding *bond = netdev_priv(dev);

4022 4023 4024
	if (bond_should_override_tx_queue(bond) &&
	    !bond_slave_override(bond, skb))
		return NETDEV_TX_OK;
4025

4026
	switch (BOND_MODE(bond)) {
4027 4028 4029 4030
	case BOND_MODE_ROUNDROBIN:
		return bond_xmit_roundrobin(skb, dev);
	case BOND_MODE_ACTIVEBACKUP:
		return bond_xmit_activebackup(skb, dev);
4031
	case BOND_MODE_8023AD:
4032
	case BOND_MODE_XOR:
4033
		return bond_3ad_xor_xmit(skb, dev);
4034 4035 4036 4037
	case BOND_MODE_BROADCAST:
		return bond_xmit_broadcast(skb, dev);
	case BOND_MODE_ALB:
		return bond_alb_xmit(skb, dev);
4038 4039
	case BOND_MODE_TLB:
		return bond_tlb_xmit(skb, dev);
4040 4041
	default:
		/* Should never happen, mode already checked */
4042
		netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
4043
		WARN_ON_ONCE(1);
E
Eric Dumazet 已提交
4044
		bond_tx_drop(dev, skb);
4045 4046 4047 4048
		return NETDEV_TX_OK;
	}
}

4049 4050 4051 4052 4053
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct bonding *bond = netdev_priv(dev);
	netdev_tx_t ret = NETDEV_TX_OK;

4054
	/* If we risk deadlock from transmitting this in the
4055 4056
	 * netpoll path, tell netpoll to queue the frame for later tx
	 */
4057
	if (unlikely(is_netpoll_tx_blocked(dev)))
4058 4059
		return NETDEV_TX_BUSY;

4060
	rcu_read_lock();
4061
	if (bond_has_slaves(bond))
4062 4063
		ret = __bond_start_xmit(skb, dev);
	else
E
Eric Dumazet 已提交
4064
		bond_tx_drop(dev, skb);
4065
	rcu_read_unlock();
4066 4067 4068

	return ret;
}
4069

4070 4071 4072 4073 4074
static int bond_ethtool_get_settings(struct net_device *bond_dev,
				     struct ethtool_cmd *ecmd)
{
	struct bonding *bond = netdev_priv(bond_dev);
	unsigned long speed = 0;
4075
	struct list_head *iter;
4076
	struct slave *slave;
4077 4078 4079 4080

	ecmd->duplex = DUPLEX_UNKNOWN;
	ecmd->port = PORT_OTHER;

4081
	/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
4082 4083 4084 4085
	 * do not need to check mode.  Though link speed might not represent
	 * the true receive or transmit bandwidth (not all modes are symmetric)
	 * this is an accurate maximum.
	 */
4086
	bond_for_each_slave(bond, slave, iter) {
4087
		if (bond_slave_can_tx(slave)) {
4088 4089 4090 4091 4092 4093 4094 4095
			if (slave->speed != SPEED_UNKNOWN)
				speed += slave->speed;
			if (ecmd->duplex == DUPLEX_UNKNOWN &&
			    slave->duplex != DUPLEX_UNKNOWN)
				ecmd->duplex = slave->duplex;
		}
	}
	ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
4096

4097 4098 4099
	return 0;
}

4100
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4101
				     struct ethtool_drvinfo *drvinfo)
4102
{
4103 4104 4105 4106
	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
		 BOND_ABI_VERSION);
4107 4108
}

4109
static const struct ethtool_ops bond_ethtool_ops = {
4110
	.get_drvinfo		= bond_ethtool_get_drvinfo,
4111
	.get_settings		= bond_ethtool_get_settings,
4112
	.get_link		= ethtool_op_get_link,
4113 4114
};

4115
static const struct net_device_ops bond_netdev_ops = {
4116
	.ndo_init		= bond_init,
S
Stephen Hemminger 已提交
4117
	.ndo_uninit		= bond_uninit,
4118 4119
	.ndo_open		= bond_open,
	.ndo_stop		= bond_close,
4120
	.ndo_start_xmit		= bond_start_xmit,
4121
	.ndo_select_queue	= bond_select_queue,
4122
	.ndo_get_stats64	= bond_get_stats,
4123
	.ndo_do_ioctl		= bond_do_ioctl,
4124
	.ndo_change_rx_flags	= bond_change_rx_flags,
4125
	.ndo_set_rx_mode	= bond_set_rx_mode,
4126
	.ndo_change_mtu		= bond_change_mtu,
J
Jiri Pirko 已提交
4127
	.ndo_set_mac_address	= bond_set_mac_address,
4128
	.ndo_neigh_setup	= bond_neigh_setup,
J
Jiri Pirko 已提交
4129
	.ndo_vlan_rx_add_vid	= bond_vlan_rx_add_vid,
4130
	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
4131
#ifdef CONFIG_NET_POLL_CONTROLLER
4132
	.ndo_netpoll_setup	= bond_netpoll_setup,
4133 4134 4135
	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
	.ndo_poll_controller	= bond_poll_controller,
#endif
J
Jiri Pirko 已提交
4136 4137
	.ndo_add_slave		= bond_enslave,
	.ndo_del_slave		= bond_release,
4138
	.ndo_fix_features	= bond_fix_features,
4139
	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
4140
	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
4141
	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
4142 4143 4144
	.ndo_fdb_add		= switchdev_port_fdb_add,
	.ndo_fdb_del		= switchdev_port_fdb_del,
	.ndo_fdb_dump		= switchdev_port_fdb_dump,
4145
	.ndo_features_check	= passthru_features_check,
4146 4147
};

4148 4149 4150 4151
static const struct device_type bond_type = {
	.name = "bond",
};

4152 4153 4154 4155 4156 4157 4158 4159
static void bond_destructor(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);
	if (bond->wq)
		destroy_workqueue(bond->wq);
	free_netdev(bond_dev);
}

4160
void bond_setup(struct net_device *bond_dev)
L
Linus Torvalds 已提交
4161
{
4162
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
4163

4164
	spin_lock_init(&bond->mode_lock);
4165
	bond->params = bonding_defaults;
L
Linus Torvalds 已提交
4166 4167 4168 4169 4170

	/* Initialize pointers */
	bond->dev = bond_dev;

	/* Initialize the device entry points */
4171
	ether_setup(bond_dev);
4172
	bond_dev->netdev_ops = &bond_netdev_ops;
4173
	bond_dev->ethtool_ops = &bond_ethtool_ops;
L
Linus Torvalds 已提交
4174

4175
	bond_dev->destructor = bond_destructor;
L
Linus Torvalds 已提交
4176

4177 4178
	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);

L
Linus Torvalds 已提交
4179 4180
	/* Initialize the device options */
	bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
4181
	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
4182
	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
4183

4184
	/* don't acquire bond device's netif_tx_lock when transmitting */
L
Linus Torvalds 已提交
4185 4186 4187 4188 4189 4190 4191 4192 4193
	bond_dev->features |= NETIF_F_LLTX;

	/* By default, we declare the bond to be fully
	 * VLAN hardware accelerated capable. Special
	 * care is taken in the various xmit functions
	 * when there are slaves that are not hw accel
	 * capable
	 */

4194 4195 4196
	/* Don't allow bond devices to change network namespaces. */
	bond_dev->features |= NETIF_F_NETNS_LOCAL;

4197
	bond_dev->hw_features = BOND_VLAN_FEATURES |
4198 4199 4200
				NETIF_F_HW_VLAN_CTAG_TX |
				NETIF_F_HW_VLAN_CTAG_RX |
				NETIF_F_HW_VLAN_CTAG_FILTER;
4201

E
Eric Dumazet 已提交
4202
	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
4203
	bond_dev->features |= bond_dev->hw_features;
L
Linus Torvalds 已提交
4204 4205
}

4206 4207 4208
/* Destroy a bonding device.
 * Must be under rtnl_lock when this function is called.
 */
4209
static void bond_uninit(struct net_device *bond_dev)
J
Jay Vosburgh 已提交
4210
{
4211
	struct bonding *bond = netdev_priv(bond_dev);
4212 4213
	struct list_head *iter;
	struct slave *slave;
4214
	struct bond_up_slave *arr;
J
Jay Vosburgh 已提交
4215

4216 4217
	bond_netpoll_cleanup(bond_dev);

4218
	/* Release the bonded slaves */
4219
	bond_for_each_slave(bond, slave, iter)
4220
		__bond_release_one(bond_dev, slave->dev, true);
4221
	netdev_info(bond_dev, "Released all slaves\n");
4222

4223 4224 4225 4226 4227 4228
	arr = rtnl_dereference(bond->slave_arr);
	if (arr) {
		RCU_INIT_POINTER(bond->slave_arr, NULL);
		kfree_rcu(arr, rcu);
	}

J
Jay Vosburgh 已提交
4229 4230
	list_del(&bond->bond_list);

4231
	bond_debug_unregister(bond);
J
Jay Vosburgh 已提交
4232 4233
}

L
Linus Torvalds 已提交
4234 4235 4236 4237
/*------------------------- Module initialization ---------------------------*/

static int bond_check_params(struct bond_params *params)
{
4238
	int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4239 4240
	struct bond_opt_value newval;
	const struct bond_opt_value *valptr;
4241
	int arp_all_targets_value;
4242
	u16 ad_actor_sys_prio = 0;
4243
	u16 ad_user_port_key = 0;
4244

4245
	/* Convert string parameters. */
L
Linus Torvalds 已提交
4246
	if (mode) {
4247 4248 4249 4250
		bond_opt_initstr(&newval, mode);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
		if (!valptr) {
			pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
L
Linus Torvalds 已提交
4251 4252
			return -EINVAL;
		}
4253
		bond_mode = valptr->value;
L
Linus Torvalds 已提交
4254 4255
	}

4256 4257
	if (xmit_hash_policy) {
		if ((bond_mode != BOND_MODE_XOR) &&
4258 4259
		    (bond_mode != BOND_MODE_8023AD) &&
		    (bond_mode != BOND_MODE_TLB)) {
J
Joe Perches 已提交
4260
			pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
J
Joe Perches 已提交
4261
				bond_mode_name(bond_mode));
4262
		} else {
4263 4264 4265 4266
			bond_opt_initstr(&newval, xmit_hash_policy);
			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
						&newval);
			if (!valptr) {
J
Joe Perches 已提交
4267
				pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
4268 4269 4270
				       xmit_hash_policy);
				return -EINVAL;
			}
4271
			xmit_hashtype = valptr->value;
4272 4273 4274
		}
	}

L
Linus Torvalds 已提交
4275 4276
	if (lacp_rate) {
		if (bond_mode != BOND_MODE_8023AD) {
J
Joe Perches 已提交
4277 4278
			pr_info("lacp_rate param is irrelevant in mode %s\n",
				bond_mode_name(bond_mode));
L
Linus Torvalds 已提交
4279
		} else {
4280 4281 4282 4283
			bond_opt_initstr(&newval, lacp_rate);
			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
						&newval);
			if (!valptr) {
J
Joe Perches 已提交
4284
				pr_err("Error: Invalid lacp rate \"%s\"\n",
4285
				       lacp_rate);
L
Linus Torvalds 已提交
4286 4287
				return -EINVAL;
			}
4288
			lacp_fast = valptr->value;
L
Linus Torvalds 已提交
4289 4290 4291
		}
	}

4292
	if (ad_select) {
4293
		bond_opt_initstr(&newval, ad_select);
4294 4295 4296 4297
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
					&newval);
		if (!valptr) {
			pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
4298 4299
			return -EINVAL;
		}
4300 4301
		params->ad_select = valptr->value;
		if (bond_mode != BOND_MODE_8023AD)
4302
			pr_warn("ad_select param only affects 802.3ad mode\n");
4303 4304 4305 4306
	} else {
		params->ad_select = BOND_AD_STABLE;
	}

4307
	if (max_bonds < 0) {
4308 4309
		pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
			max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
L
Linus Torvalds 已提交
4310 4311 4312 4313
		max_bonds = BOND_DEFAULT_MAX_BONDS;
	}

	if (miimon < 0) {
4314 4315
		pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			miimon, INT_MAX);
4316
		miimon = 0;
L
Linus Torvalds 已提交
4317 4318 4319
	}

	if (updelay < 0) {
4320 4321
		pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			updelay, INT_MAX);
L
Linus Torvalds 已提交
4322 4323 4324 4325
		updelay = 0;
	}

	if (downdelay < 0) {
4326 4327
		pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			downdelay, INT_MAX);
L
Linus Torvalds 已提交
4328 4329 4330 4331
		downdelay = 0;
	}

	if ((use_carrier != 0) && (use_carrier != 1)) {
4332 4333
		pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
			use_carrier);
L
Linus Torvalds 已提交
4334 4335 4336
		use_carrier = 1;
	}

4337
	if (num_peer_notif < 0 || num_peer_notif > 255) {
4338 4339
		pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
			num_peer_notif);
4340 4341 4342
		num_peer_notif = 1;
	}

4343
	/* reset values for 802.3ad/TLB/ALB */
4344
	if (!bond_mode_uses_arp(bond_mode)) {
L
Linus Torvalds 已提交
4345
		if (!miimon) {
4346 4347
			pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
			pr_warn("Forcing miimon to 100msec\n");
4348
			miimon = BOND_DEFAULT_MIIMON;
L
Linus Torvalds 已提交
4349 4350 4351
		}
	}

4352
	if (tx_queues < 1 || tx_queues > 255) {
4353 4354
		pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
			tx_queues, BOND_DEFAULT_TX_QUEUES);
4355 4356 4357
		tx_queues = BOND_DEFAULT_TX_QUEUES;
	}

4358
	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
4359 4360
		pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
			all_slaves_active);
4361 4362 4363
		all_slaves_active = 0;
	}

4364
	if (resend_igmp < 0 || resend_igmp > 255) {
4365 4366
		pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
			resend_igmp, BOND_DEFAULT_RESEND_IGMP);
4367 4368 4369
		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
	}

4370 4371
	bond_opt_initval(&newval, packets_per_slave);
	if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
4372 4373 4374 4375 4376
		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
			packets_per_slave, USHRT_MAX);
		packets_per_slave = 1;
	}

L
Linus Torvalds 已提交
4377
	if (bond_mode == BOND_MODE_ALB) {
J
Joe Perches 已提交
4378 4379
		pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
			  updelay);
L
Linus Torvalds 已提交
4380 4381 4382 4383 4384 4385 4386
	}

	if (!miimon) {
		if (updelay || downdelay) {
			/* just warn the user the up/down delay will have
			 * no effect since miimon is zero...
			 */
4387 4388
			pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
				updelay, downdelay);
L
Linus Torvalds 已提交
4389 4390 4391 4392
		}
	} else {
		/* don't allow arp monitoring */
		if (arp_interval) {
4393 4394
			pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
				miimon, arp_interval);
L
Linus Torvalds 已提交
4395 4396 4397 4398
			arp_interval = 0;
		}

		if ((updelay % miimon) != 0) {
4399 4400
			pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
				updelay, miimon, (updelay / miimon) * miimon);
L
Linus Torvalds 已提交
4401 4402 4403 4404 4405
		}

		updelay /= miimon;

		if ((downdelay % miimon) != 0) {
4406 4407 4408
			pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
				downdelay, miimon,
				(downdelay / miimon) * miimon);
L
Linus Torvalds 已提交
4409 4410 4411 4412 4413 4414
		}

		downdelay /= miimon;
	}

	if (arp_interval < 0) {
4415 4416
		pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			arp_interval, INT_MAX);
4417
		arp_interval = 0;
L
Linus Torvalds 已提交
4418 4419
	}

4420 4421
	for (arp_ip_count = 0, i = 0;
	     (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
4422
		__be32 ip;
4423 4424

		/* not a complete check, but good enough to catch mistakes */
4425
		if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
4426
		    !bond_is_ip_target_ok(ip)) {
4427 4428
			pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
				arp_ip_target[i]);
L
Linus Torvalds 已提交
4429 4430
			arp_interval = 0;
		} else {
4431 4432 4433
			if (bond_get_targets_ip(arp_target, ip) == -1)
				arp_target[arp_ip_count++] = ip;
			else
4434 4435
				pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
					&ip);
L
Linus Torvalds 已提交
4436 4437 4438 4439 4440
		}
	}

	if (arp_interval && !arp_ip_count) {
		/* don't allow arping if no arp_ip_target given... */
4441 4442
		pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
			arp_interval);
L
Linus Torvalds 已提交
4443 4444 4445
		arp_interval = 0;
	}

4446 4447
	if (arp_validate) {
		if (!arp_interval) {
J
Joe Perches 已提交
4448
			pr_err("arp_validate requires arp_interval\n");
4449 4450 4451
			return -EINVAL;
		}

4452 4453 4454 4455
		bond_opt_initstr(&newval, arp_validate);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4456
			pr_err("Error: invalid arp_validate \"%s\"\n",
4457
			       arp_validate);
4458 4459
			return -EINVAL;
		}
4460 4461
		arp_validate_value = valptr->value;
	} else {
4462
		arp_validate_value = 0;
4463
	}
4464

4465 4466
	arp_all_targets_value = 0;
	if (arp_all_targets) {
4467 4468 4469 4470
		bond_opt_initstr(&newval, arp_all_targets);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
					&newval);
		if (!valptr) {
4471 4472 4473
			pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
			       arp_all_targets);
			arp_all_targets_value = 0;
4474 4475
		} else {
			arp_all_targets_value = valptr->value;
4476 4477 4478
		}
	}

L
Linus Torvalds 已提交
4479
	if (miimon) {
J
Joe Perches 已提交
4480
		pr_info("MII link monitoring set to %d ms\n", miimon);
L
Linus Torvalds 已提交
4481
	} else if (arp_interval) {
4482 4483
		valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
					  arp_validate_value);
J
Joe Perches 已提交
4484
		pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
4485
			arp_interval, valptr->string, arp_ip_count);
L
Linus Torvalds 已提交
4486 4487

		for (i = 0; i < arp_ip_count; i++)
J
Joe Perches 已提交
4488
			pr_cont(" %s", arp_ip_target[i]);
L
Linus Torvalds 已提交
4489

J
Joe Perches 已提交
4490
		pr_cont("\n");
L
Linus Torvalds 已提交
4491

4492
	} else if (max_bonds) {
L
Linus Torvalds 已提交
4493 4494 4495
		/* miimon and arp_interval not set, we need one so things
		 * work as expected, see bonding.txt for details
		 */
J
Joe Perches 已提交
4496
		pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
L
Linus Torvalds 已提交
4497 4498
	}

4499
	if (primary && !bond_mode_uses_primary(bond_mode)) {
L
Linus Torvalds 已提交
4500 4501 4502
		/* currently, using a primary only makes sense
		 * in active backup, TLB or ALB modes
		 */
4503 4504
		pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
			primary, bond_mode_name(bond_mode));
L
Linus Torvalds 已提交
4505 4506 4507
		primary = NULL;
	}

4508
	if (primary && primary_reselect) {
4509 4510 4511 4512
		bond_opt_initstr(&newval, primary_reselect);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4513
			pr_err("Error: Invalid primary_reselect \"%s\"\n",
4514
			       primary_reselect);
4515 4516
			return -EINVAL;
		}
4517
		primary_reselect_value = valptr->value;
4518 4519 4520 4521
	} else {
		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
	}

4522
	if (fail_over_mac) {
4523 4524 4525 4526
		bond_opt_initstr(&newval, fail_over_mac);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
4527
			pr_err("Error: invalid fail_over_mac \"%s\"\n",
4528
			       fail_over_mac);
4529 4530
			return -EINVAL;
		}
4531
		fail_over_mac_value = valptr->value;
4532
		if (bond_mode != BOND_MODE_ACTIVEBACKUP)
4533
			pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
4534 4535 4536
	} else {
		fail_over_mac_value = BOND_FOM_NONE;
	}
4537

4538 4539 4540 4541 4542 4543 4544 4545 4546 4547
	bond_opt_initstr(&newval, "default");
	valptr = bond_opt_parse(
			bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
				     &newval);
	if (!valptr) {
		pr_err("Error: No ad_actor_sys_prio default value");
		return -EINVAL;
	}
	ad_actor_sys_prio = valptr->value;

4548 4549 4550 4551 4552 4553 4554 4555
	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
				&newval);
	if (!valptr) {
		pr_err("Error: No ad_user_port_key default value");
		return -EINVAL;
	}
	ad_user_port_key = valptr->value;

4556
	if (lp_interval == 0) {
4557 4558
		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
			INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
4559 4560 4561
		lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
	}

L
Linus Torvalds 已提交
4562 4563
	/* fill params struct with the proper values */
	params->mode = bond_mode;
4564
	params->xmit_policy = xmit_hashtype;
L
Linus Torvalds 已提交
4565
	params->miimon = miimon;
4566
	params->num_peer_notif = num_peer_notif;
L
Linus Torvalds 已提交
4567
	params->arp_interval = arp_interval;
4568
	params->arp_validate = arp_validate_value;
4569
	params->arp_all_targets = arp_all_targets_value;
L
Linus Torvalds 已提交
4570 4571 4572 4573 4574
	params->updelay = updelay;
	params->downdelay = downdelay;
	params->use_carrier = use_carrier;
	params->lacp_fast = lacp_fast;
	params->primary[0] = 0;
4575
	params->primary_reselect = primary_reselect_value;
4576
	params->fail_over_mac = fail_over_mac_value;
4577
	params->tx_queues = tx_queues;
4578
	params->all_slaves_active = all_slaves_active;
4579
	params->resend_igmp = resend_igmp;
4580
	params->min_links = min_links;
4581
	params->lp_interval = lp_interval;
4582
	params->packets_per_slave = packets_per_slave;
4583
	params->tlb_dynamic_lb = 1; /* Default value */
4584
	params->ad_actor_sys_prio = ad_actor_sys_prio;
4585
	eth_zero_addr(params->ad_actor_system);
4586
	params->ad_user_port_key = ad_user_port_key;
4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597
	if (packets_per_slave > 0) {
		params->reciprocal_packets_per_slave =
			reciprocal_value(packets_per_slave);
	} else {
		/* reciprocal_packets_per_slave is unused if
		 * packets_per_slave is 0 or 1, just initialize it
		 */
		params->reciprocal_packets_per_slave =
			(struct reciprocal_value) { 0 };
	}

L
Linus Torvalds 已提交
4598 4599 4600 4601 4602 4603 4604 4605 4606 4607
	if (primary) {
		strncpy(params->primary, primary, IFNAMSIZ);
		params->primary[IFNAMSIZ - 1] = 0;
	}

	memcpy(params->arp_targets, arp_target, sizeof(arp_target));

	return 0;
}

4608
static struct lock_class_key bonding_netdev_xmit_lock_key;
4609
static struct lock_class_key bonding_netdev_addr_lock_key;
4610
static struct lock_class_key bonding_tx_busylock_key;
4611

4612 4613 4614
static void bond_set_lockdep_class_one(struct net_device *dev,
				       struct netdev_queue *txq,
				       void *_unused)
4615 4616 4617 4618 4619 4620 4621
{
	lockdep_set_class(&txq->_xmit_lock,
			  &bonding_netdev_xmit_lock_key);
}

static void bond_set_lockdep_class(struct net_device *dev)
{
4622 4623
	lockdep_set_class(&dev->addr_list_lock,
			  &bonding_netdev_addr_lock_key);
4624
	netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
4625
	dev->qdisc_tx_busylock = &bonding_tx_busylock_key;
4626 4627
}

4628
/* Called from registration process */
4629 4630 4631
static int bond_init(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);
4632
	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
4633

4634
	netdev_dbg(bond_dev, "Begin bond_init\n");
4635 4636 4637 4638 4639 4640 4641

	bond->wq = create_singlethread_workqueue(bond_dev->name);
	if (!bond->wq)
		return -ENOMEM;

	bond_set_lockdep_class(bond_dev);

4642
	list_add_tail(&bond->bond_list, &bn->dev_list);
4643

4644
	bond_prepare_sysfs_group(bond);
4645

4646 4647
	bond_debug_register(bond);

4648 4649
	/* Ensure valid dev_addr */
	if (is_zero_ether_addr(bond_dev->dev_addr) &&
4650
	    bond_dev->addr_assign_type == NET_ADDR_PERM)
4651 4652
		eth_hw_addr_random(bond_dev);

4653 4654 4655
	return 0;
}

4656
unsigned int bond_get_num_tx_queues(void)
4657
{
4658
	return tx_queues;
4659 4660
}

4661
/* Create a new bond based on the specified name and bonding parameters.
4662
 * If name is NULL, obtain a suitable "bond%d" name for us.
4663 4664 4665
 * Caller must NOT hold rtnl_lock; we need to release it here before we
 * set up our sysfs entries.
 */
4666
int bond_create(struct net *net, const char *name)
4667 4668
{
	struct net_device *bond_dev;
4669 4670
	struct bonding *bond;
	struct alb_bond_info *bond_info;
4671 4672 4673
	int res;

	rtnl_lock();
4674

4675
	bond_dev = alloc_netdev_mq(sizeof(struct bonding),
4676
				   name ? name : "bond%d", NET_NAME_UNKNOWN,
4677
				   bond_setup, tx_queues);
4678
	if (!bond_dev) {
J
Joe Perches 已提交
4679
		pr_err("%s: eek! can't alloc netdev!\n", name);
4680 4681
		rtnl_unlock();
		return -ENOMEM;
4682 4683
	}

4684 4685 4686 4687 4688 4689 4690 4691
	/*
	 * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
	 * It is set to 0 by default which is wrong.
	 */
	bond = netdev_priv(bond_dev);
	bond_info = &(BOND_ALB_INFO(bond));
	bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;

4692
	dev_net_set(bond_dev, net);
4693 4694
	bond_dev->rtnl_link_ops = &bond_link_ops;

4695
	res = register_netdevice(bond_dev);
4696

4697 4698
	netif_carrier_off(bond_dev);

4699
	rtnl_unlock();
4700 4701
	if (res < 0)
		bond_destructor(bond_dev);
E
Eric W. Biederman 已提交
4702
	return res;
4703 4704
}

4705
static int __net_init bond_net_init(struct net *net)
4706
{
4707
	struct bond_net *bn = net_generic(net, bond_net_id);
4708 4709 4710 4711 4712

	bn->net = net;
	INIT_LIST_HEAD(&bn->dev_list);

	bond_create_proc_dir(bn);
4713
	bond_create_sysfs(bn);
4714

4715
	return 0;
4716 4717
}

4718
static void __net_exit bond_net_exit(struct net *net)
4719
{
4720
	struct bond_net *bn = net_generic(net, bond_net_id);
4721 4722
	struct bonding *bond, *tmp_bond;
	LIST_HEAD(list);
4723

4724
	bond_destroy_sysfs(bn);
4725 4726 4727 4728 4729 4730 4731

	/* Kill off any bonds created after unregistering bond rtnl ops */
	rtnl_lock();
	list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
		unregister_netdevice_queue(bond->dev, &list);
	unregister_netdevice_many(&list);
	rtnl_unlock();
4732 4733

	bond_destroy_proc_dir(bn);
4734 4735 4736 4737 4738
}

static struct pernet_operations bond_net_ops = {
	.init = bond_net_init,
	.exit = bond_net_exit,
4739 4740
	.id   = &bond_net_id,
	.size = sizeof(struct bond_net),
4741 4742
};

L
Linus Torvalds 已提交
4743 4744 4745 4746 4747
static int __init bonding_init(void)
{
	int i;
	int res;

4748
	pr_info("%s", bond_version);
L
Linus Torvalds 已提交
4749

4750
	res = bond_check_params(&bonding_defaults);
S
Stephen Hemminger 已提交
4751
	if (res)
4752
		goto out;
L
Linus Torvalds 已提交
4753

4754
	res = register_pernet_subsys(&bond_net_ops);
4755 4756
	if (res)
		goto out;
4757

4758
	res = bond_netlink_init();
4759
	if (res)
4760
		goto err_link;
4761

4762 4763
	bond_create_debugfs();

L
Linus Torvalds 已提交
4764
	for (i = 0; i < max_bonds; i++) {
4765
		res = bond_create(&init_net, NULL);
4766 4767
		if (res)
			goto err;
L
Linus Torvalds 已提交
4768 4769 4770
	}

	register_netdevice_notifier(&bond_netdev_notifier);
4771
out:
L
Linus Torvalds 已提交
4772
	return res;
4773
err:
4774
	bond_destroy_debugfs();
4775
	bond_netlink_fini();
4776
err_link:
4777
	unregister_pernet_subsys(&bond_net_ops);
4778
	goto out;
4779

L
Linus Torvalds 已提交
4780 4781 4782 4783 4784 4785
}

static void __exit bonding_exit(void)
{
	unregister_netdevice_notifier(&bond_netdev_notifier);

4786
	bond_destroy_debugfs();
4787

4788
	bond_netlink_fini();
4789
	unregister_pernet_subsys(&bond_net_ops);
4790 4791

#ifdef CONFIG_NET_POLL_CONTROLLER
4792
	/* Make sure we don't have an imbalance on our netpoll blocking */
4793
	WARN_ON(atomic_read(&netpoll_block_tx));
4794
#endif
L
Linus Torvalds 已提交
4795 4796 4797 4798 4799 4800 4801 4802
}

module_init(bonding_init);
module_exit(bonding_exit);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");