bond_main.c 155.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
 * originally based on the dummy device.
 *
 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
 * Licensed under the GPL. Based on dummy.c, and eql.c devices.
 *
 * bonding.c: an Ethernet Bonding driver
 *
 * This is useful to talk to a Cisco EtherChannel compatible equipment:
 *	Cisco 5500
 *	Sun Trunking (Solaris)
 *	Alteon AceDirector Trunks
 *	Linux Bonding
 *	and probably many L2 switches ...
 *
 * How it works:
 *    ifconfig bond0 ipaddress netmask up
 *      will setup a network device, with an ip address.  No mac address
 *	will be assigned at this time.  The hw mac address will come from
 *	the first slave bonded to the channel.  All slaves will then use
 *	this hw mac address.
 *
 *    ifconfig bond0 down
 *         will release all slaves, marking them as down.
 *
 *    ifenslave bond0 eth0
 *	will attach eth0 to bond0 as a slave.  eth0 hw mac address will either
 *	a: be used as initial mac address
 *	b: if a hw mac address already is there, eth0's hw mac address
 *	   will then be set from bond0.
 *
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
42
#include <net/ip.h>
L
Linus Torvalds 已提交
43
#include <linux/ip.h>
M
Matteo Croce 已提交
44 45
#include <linux/icmp.h>
#include <linux/icmpv6.h>
46 47
#include <linux/tcp.h>
#include <linux/udp.h>
L
Linus Torvalds 已提交
48 49 50 51 52 53 54 55
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/socket.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/bitops.h>
S
Stephen Hemminger 已提交
56
#include <linux/io.h>
L
Linus Torvalds 已提交
57
#include <asm/dma.h>
S
Stephen Hemminger 已提交
58
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
59 60 61
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
62
#include <linux/igmp.h>
L
Linus Torvalds 已提交
63 64 65 66 67 68 69 70 71 72 73
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/smp.h>
#include <linux/if_ether.h>
#include <net/arp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_bonding.h>
D
David Sterba 已提交
74
#include <linux/jiffies.h>
75
#include <linux/preempt.h>
J
Jay Vosburgh 已提交
76
#include <net/route.h>
77
#include <net/net_namespace.h>
78
#include <net/netns/generic.h>
79
#include <net/pkt_sched.h>
80
#include <linux/rculist.h>
81
#include <net/flow_dissector.h>
82
#include <net/xfrm.h>
83 84 85
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
86 87 88
#if IS_ENABLED(CONFIG_TLS_DEVICE)
#include <net/tls.h>
#endif
L
Linus Torvalds 已提交
89

90 91
#include "bonding_priv.h"

L
Linus Torvalds 已提交
92 93 94 95 96
/*---------------------------- Module parameters ----------------------------*/

/* monitor all links that often (in milliseconds). <=0 disables monitoring */

static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
97
static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
98
static int num_peer_notif = 1;
99
static int miimon;
S
Stephen Hemminger 已提交
100 101
static int updelay;
static int downdelay;
L
Linus Torvalds 已提交
102
static int use_carrier	= 1;
S
Stephen Hemminger 已提交
103 104
static char *mode;
static char *primary;
105
static char *primary_reselect;
S
Stephen Hemminger 已提交
106
static char *lacp_rate;
107
static int min_links;
S
Stephen Hemminger 已提交
108 109
static char *ad_select;
static char *xmit_hash_policy;
110
static int arp_interval;
S
Stephen Hemminger 已提交
111 112
static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
113
static char *arp_all_targets;
S
Stephen Hemminger 已提交
114
static char *fail_over_mac;
115
static int all_slaves_active;
116
static struct bond_params bonding_defaults;
117
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
118
static int packets_per_slave = 1;
119
static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
L
Linus Torvalds 已提交
120 121 122

module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
123 124
module_param(tx_queues, int, 0);
MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
125
module_param_named(num_grat_arp, num_peer_notif, int, 0644);
126 127
MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
			       "failover event (alias of num_unsol_na)");
128
module_param_named(num_unsol_na, num_peer_notif, int, 0644);
129 130
MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
			       "failover event (alias of num_grat_arp)");
L
Linus Torvalds 已提交
131 132 133 134 135
module_param(miimon, int, 0);
MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
module_param(updelay, int, 0);
MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
module_param(downdelay, int, 0);
136 137
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
			    "in milliseconds");
L
Linus Torvalds 已提交
138
module_param(use_carrier, int, 0);
139
MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
140
			      "0 for off, 1 for on (default)");
L
Linus Torvalds 已提交
141
module_param(mode, charp, 0);
142
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
143 144 145
		       "1 for active-backup, 2 for balance-xor, "
		       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
		       "6 for balance-alb");
L
Linus Torvalds 已提交
146 147
module_param(primary, charp, 0);
MODULE_PARM_DESC(primary, "Primary network device to use");
148 149 150 151 152 153 154 155
module_param(primary_reselect, charp, 0);
MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
				   "once it comes up; "
				   "0 for always (default), "
				   "1 for only if speed of primary is "
				   "better, "
				   "2 for only on active slave "
				   "failure");
L
Linus Torvalds 已提交
156
module_param(lacp_rate, charp, 0);
157 158
MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
			    "0 for slow, 1 for fast");
159
module_param(ad_select, charp, 0);
Z
Zhu Yanjun 已提交
160
MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
161 162
			    "0 for stable (default), 1 for bandwidth, "
			    "2 for count");
163 164 165
module_param(min_links, int, 0);
MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");

166
module_param(xmit_hash_policy, charp, 0);
167
MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
168
				   "0 for layer 2 (default), 1 for layer 3+4, "
169
				   "2 for layer 2+3, 3 for encap layer 2+3, "
170
				   "4 for encap layer 3+4, 5 for vlan+srcmac");
L
Linus Torvalds 已提交
171 172 173 174
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
175
module_param(arp_validate, charp, 0);
176 177 178
MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
			       "0 for none (default), 1 for active, "
			       "2 for backup, 3 for all");
179 180
module_param(arp_all_targets, charp, 0);
MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
181
module_param(fail_over_mac, charp, 0);
182 183 184
MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
				"the same MAC; 0 for none (default), "
				"1 for active, 2 for follow");
185
module_param(all_slaves_active, int, 0);
186
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
187
				     "by setting active flag for all slaves; "
188
				     "0 for never (default), 1 for always.");
189
module_param(resend_igmp, int, 0);
190 191
MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
			      "link failure");
192 193 194 195
module_param(packets_per_slave, int, 0);
MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
				    "mode; 0 for a random slave, 1 packet per "
				    "slave (default), >1 packets per slave.");
196 197 198 199
module_param(lp_interval, uint, 0);
MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
			      "the bonding driver sends learning packets to "
			      "each slaves peer switch. The default is 1.");
L
Linus Torvalds 已提交
200 201 202

/*----------------------------- Global variables ----------------------------*/

203
#ifdef CONFIG_NET_POLL_CONTROLLER
204
atomic_t netpoll_block_tx = ATOMIC_INIT(0);
205 206
#endif

207
unsigned int bond_net_id __read_mostly;
L
Linus Torvalds 已提交
208

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
static const struct flow_dissector_key flow_keys_bonding_keys[] = {
	{
		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
		.offset = offsetof(struct flow_keys, control),
	},
	{
		.key_id = FLOW_DISSECTOR_KEY_BASIC,
		.offset = offsetof(struct flow_keys, basic),
	},
	{
		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
		.offset = offsetof(struct flow_keys, addrs.v4addrs),
	},
	{
		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
		.offset = offsetof(struct flow_keys, addrs.v6addrs),
	},
	{
		.key_id = FLOW_DISSECTOR_KEY_TIPC,
		.offset = offsetof(struct flow_keys, addrs.tipckey),
	},
	{
		.key_id = FLOW_DISSECTOR_KEY_PORTS,
		.offset = offsetof(struct flow_keys, ports),
	},
	{
		.key_id = FLOW_DISSECTOR_KEY_ICMP,
		.offset = offsetof(struct flow_keys, icmp),
	},
	{
		.key_id = FLOW_DISSECTOR_KEY_VLAN,
		.offset = offsetof(struct flow_keys, vlan),
	},
	{
		.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
		.offset = offsetof(struct flow_keys, tags),
	},
	{
		.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
		.offset = offsetof(struct flow_keys, keyid),
	},
};

static struct flow_dissector flow_keys_bonding __read_mostly;

L
Linus Torvalds 已提交
254 255
/*-------------------------- Forward declarations ---------------------------*/

256
static int bond_init(struct net_device *bond_dev);
257
static void bond_uninit(struct net_device *bond_dev);
258 259
static void bond_get_stats(struct net_device *bond_dev,
			   struct rtnl_link_stats64 *stats);
260
static void bond_slave_arr_handler(struct work_struct *work);
261 262
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
				  int mod);
263
static void bond_netdev_notify_work(struct work_struct *work);
L
Linus Torvalds 已提交
264 265 266

/*---------------------------- General routines -----------------------------*/

267
const char *bond_mode_name(int mode)
L
Linus Torvalds 已提交
268
{
269 270 271 272 273
	static const char *names[] = {
		[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
		[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
		[BOND_MODE_XOR] = "load balancing (xor)",
		[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
S
Stephen Hemminger 已提交
274
		[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
275 276 277 278
		[BOND_MODE_TLB] = "transmit load balancing",
		[BOND_MODE_ALB] = "adaptive load balancing",
	};

279
	if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
L
Linus Torvalds 已提交
280
		return "unknown";
281 282

	return names[mode];
L
Linus Torvalds 已提交
283 284 285 286
}

/**
 * bond_dev_queue_xmit - Prepare skb for xmit.
S
Stephen Hemminger 已提交
287
 *
L
Linus Torvalds 已提交
288 289 290 291
 * @bond: bond device that got this skb for tx.
 * @skb: hw accel VLAN tagged skb to transmit
 * @slave_dev: slave that is supposed to xmit this skbuff
 */
292
netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
S
Stephen Hemminger 已提交
293
			struct net_device *slave_dev)
L
Linus Torvalds 已提交
294
{
295
	skb->dev = slave_dev;
296

297
	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
298
		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
299
	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
300

301
	if (unlikely(netpoll_tx_running(bond->dev)))
302 303 304
		return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);

	return dev_queue_xmit(skb);
L
Linus Torvalds 已提交
305 306
}

307 308 309 310 311 312 313 314 315 316 317 318 319
bool bond_sk_check(struct bonding *bond)
{
	switch (BOND_MODE(bond)) {
	case BOND_MODE_8023AD:
	case BOND_MODE_XOR:
		if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
			return true;
		fallthrough;
	default:
		return false;
	}
}

320 321
/*---------------------------------- VLAN -----------------------------------*/

322
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
J
Jiri Pirko 已提交
323
 * We don't protect the slave list iteration with a lock because:
L
Linus Torvalds 已提交
324 325 326 327
 * a. This operation is performed in IOCTL context,
 * b. The operation is protected by the RTNL semaphore in the 8021q code,
 * c. Holding a lock with BH disabled while directly calling a base driver
 *    entry point is generally a BAD idea.
S
Stephen Hemminger 已提交
328
 *
L
Linus Torvalds 已提交
329 330 331 332 333 334 335 336 337 338 339 340
 * The design of synchronization/protection for this operation in the 8021q
 * module is good for one or more VLAN devices over a single physical device
 * and cannot be extended for a teaming solution like bonding, so there is a
 * potential race condition here where a net device from the vlan group might
 * be referenced (either by a base driver or the 8021q code) while it is being
 * removed from the system. However, it turns out we're not making matters
 * worse, and if it works for regular VLAN usage it will work here too.
*/

/**
 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
 * @bond_dev: bonding net device that got called
341
 * @proto: network protocol ID
L
Linus Torvalds 已提交
342 343
 * @vid: vlan id being added
 */
344 345
static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
				__be16 proto, u16 vid)
L
Linus Torvalds 已提交
346
{
347
	struct bonding *bond = netdev_priv(bond_dev);
348
	struct slave *slave, *rollback_slave;
349
	struct list_head *iter;
350
	int res;
L
Linus Torvalds 已提交
351

352
	bond_for_each_slave(bond, slave, iter) {
353
		res = vlan_vid_add(slave->dev, proto, vid);
354 355
		if (res)
			goto unwind;
L
Linus Torvalds 已提交
356 357
	}

358
	return 0;
359 360

unwind:
361
	/* unwind to the slave that failed */
362
	bond_for_each_slave(bond, rollback_slave, iter) {
363 364 365 366 367
		if (rollback_slave == slave)
			break;

		vlan_vid_del(rollback_slave->dev, proto, vid);
	}
368 369

	return res;
L
Linus Torvalds 已提交
370 371 372 373 374
}

/**
 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
 * @bond_dev: bonding net device that got called
375
 * @proto: network protocol ID
L
Linus Torvalds 已提交
376 377
 * @vid: vlan id being removed
 */
378 379
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
				 __be16 proto, u16 vid)
L
Linus Torvalds 已提交
380
{
381
	struct bonding *bond = netdev_priv(bond_dev);
382
	struct list_head *iter;
L
Linus Torvalds 已提交
383 384
	struct slave *slave;

385
	bond_for_each_slave(bond, slave, iter)
386
		vlan_vid_del(slave->dev, proto, vid);
L
Linus Torvalds 已提交
387

388 389
	if (bond_is_lb(bond))
		bond_alb_clear_vlan(bond, vid);
390 391

	return 0;
L
Linus Torvalds 已提交
392 393
}

394 395 396 397 398 399 400 401 402 403
/*---------------------------------- XFRM -----------------------------------*/

#ifdef CONFIG_XFRM_OFFLOAD
/**
 * bond_ipsec_add_sa - program device with a security association
 * @xs: pointer to transformer state struct
 **/
static int bond_ipsec_add_sa(struct xfrm_state *xs)
{
	struct net_device *bond_dev = xs->xso.dev;
404
	struct bond_ipsec *ipsec;
405 406
	struct bonding *bond;
	struct slave *slave;
407
	int err;
408

409 410 411
	if (!bond_dev)
		return -EINVAL;

412
	rcu_read_lock();
413
	bond = netdev_priv(bond_dev);
414
	slave = rcu_dereference(bond->curr_active_slave);
415 416 417 418 419
	if (!slave) {
		rcu_read_unlock();
		return -ENODEV;
	}

420 421 422
	if (!slave->dev->xfrmdev_ops ||
	    !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
	    netif_is_bond_master(slave->dev)) {
423
		slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
424
		rcu_read_unlock();
425 426 427
		return -EINVAL;
	}

428 429 430 431 432 433 434
	ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
	if (!ipsec) {
		rcu_read_unlock();
		return -ENOMEM;
	}
	xs->xso.real_dev = slave->dev;

435
	err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
436 437 438 439 440 441 442 443 444
	if (!err) {
		ipsec->xs = xs;
		INIT_LIST_HEAD(&ipsec->list);
		spin_lock_bh(&bond->ipsec_lock);
		list_add(&ipsec->list, &bond->ipsec_list);
		spin_unlock_bh(&bond->ipsec_lock);
	} else {
		kfree(ipsec);
	}
445 446
	rcu_read_unlock();
	return err;
447 448
}

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
static void bond_ipsec_add_sa_all(struct bonding *bond)
{
	struct net_device *bond_dev = bond->dev;
	struct bond_ipsec *ipsec;
	struct slave *slave;

	rcu_read_lock();
	slave = rcu_dereference(bond->curr_active_slave);
	if (!slave)
		goto out;

	if (!slave->dev->xfrmdev_ops ||
	    !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
	    netif_is_bond_master(slave->dev)) {
		spin_lock_bh(&bond->ipsec_lock);
		if (!list_empty(&bond->ipsec_list))
			slave_warn(bond_dev, slave->dev,
				   "%s: no slave xdo_dev_state_add\n",
				   __func__);
		spin_unlock_bh(&bond->ipsec_lock);
		goto out;
	}

	spin_lock_bh(&bond->ipsec_lock);
	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
		ipsec->xs->xso.real_dev = slave->dev;
		if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
			slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
			ipsec->xs->xso.real_dev = NULL;
		}
	}
	spin_unlock_bh(&bond->ipsec_lock);
out:
	rcu_read_unlock();
}

485 486 487 488 489 490 491
/**
 * bond_ipsec_del_sa - clear out this specific SA
 * @xs: pointer to transformer state struct
 **/
static void bond_ipsec_del_sa(struct xfrm_state *xs)
{
	struct net_device *bond_dev = xs->xso.dev;
492
	struct bond_ipsec *ipsec;
493 494 495 496 497 498
	struct bonding *bond;
	struct slave *slave;

	if (!bond_dev)
		return;

499
	rcu_read_lock();
500
	bond = netdev_priv(bond_dev);
501
	slave = rcu_dereference(bond->curr_active_slave);
502 503

	if (!slave)
504
		goto out;
505

506 507 508 509
	if (!xs->xso.real_dev)
		goto out;

	WARN_ON(xs->xso.real_dev != slave->dev);
510

511 512 513
	if (!slave->dev->xfrmdev_ops ||
	    !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
	    netif_is_bond_master(slave->dev)) {
514
		slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
515
		goto out;
516 517 518
	}

	slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
519
out:
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
	spin_lock_bh(&bond->ipsec_lock);
	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
		if (ipsec->xs == xs) {
			list_del(&ipsec->list);
			kfree(ipsec);
			break;
		}
	}
	spin_unlock_bh(&bond->ipsec_lock);
	rcu_read_unlock();
}

static void bond_ipsec_del_sa_all(struct bonding *bond)
{
	struct net_device *bond_dev = bond->dev;
	struct bond_ipsec *ipsec;
	struct slave *slave;

	rcu_read_lock();
	slave = rcu_dereference(bond->curr_active_slave);
	if (!slave) {
		rcu_read_unlock();
		return;
	}

	spin_lock_bh(&bond->ipsec_lock);
	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
		if (!ipsec->xs->xso.real_dev)
			continue;

		if (!slave->dev->xfrmdev_ops ||
		    !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
		    netif_is_bond_master(slave->dev)) {
			slave_warn(bond_dev, slave->dev,
				   "%s: no slave xdo_dev_state_delete\n",
				   __func__);
		} else {
			slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
		}
		ipsec->xs->xso.real_dev = NULL;
	}
	spin_unlock_bh(&bond->ipsec_lock);
562
	rcu_read_unlock();
563 564 565 566 567 568 569 570 571 572
}

/**
 * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
 * @skb: current data packet
 * @xs: pointer to transformer state struct
 **/
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
	struct net_device *bond_dev = xs->xso.dev;
573 574 575
	struct net_device *real_dev;
	struct slave *curr_active;
	struct bonding *bond;
576
	int err;
577 578

	bond = netdev_priv(bond_dev);
579
	rcu_read_lock();
580 581
	curr_active = rcu_dereference(bond->curr_active_slave);
	real_dev = curr_active->dev;
582

583
	if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
584
		err = false;
585 586
		goto out;
	}
587

588 589 590 591
	if (!xs->xso.real_dev) {
		err = false;
		goto out;
	}
592 593 594 595

	if (!real_dev->xfrmdev_ops ||
	    !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
	    netif_is_bond_master(real_dev)) {
596 597
		err = false;
		goto out;
598 599
	}

600 601 602 603
	err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
out:
	rcu_read_unlock();
	return err;
604 605 606 607 608 609 610 611 612
}

static const struct xfrmdev_ops bond_xfrmdev_ops = {
	.xdo_dev_state_add = bond_ipsec_add_sa,
	.xdo_dev_state_delete = bond_ipsec_del_sa,
	.xdo_dev_offload_ok = bond_ipsec_offload_ok,
};
#endif /* CONFIG_XFRM_OFFLOAD */

L
Linus Torvalds 已提交
613 614
/*------------------------------- Link status -------------------------------*/

615
/* Set the carrier state for the master according to the state of its
616 617 618 619 620
 * slaves.  If any slaves are up, the master is up.  In 802.3ad mode,
 * do special 802.3ad magic.
 *
 * Returns zero if carrier state does not change, nonzero if it does.
 */
621
int bond_set_carrier(struct bonding *bond)
622
{
623
	struct list_head *iter;
624 625
	struct slave *slave;

626
	if (!bond_has_slaves(bond))
627 628
		goto down;

629
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
630 631
		return bond_3ad_set_carrier(bond);

632
	bond_for_each_slave(bond, slave, iter) {
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
		if (slave->link == BOND_LINK_UP) {
			if (!netif_carrier_ok(bond->dev)) {
				netif_carrier_on(bond->dev);
				return 1;
			}
			return 0;
		}
	}

down:
	if (netif_carrier_ok(bond->dev)) {
		netif_carrier_off(bond->dev);
		return 1;
	}
	return 0;
}

650
/* Get link speed and duplex from the slave's base driver
L
Linus Torvalds 已提交
651
 * using ethtool. If for some reason the call fails or the
652
 * values are invalid, set speed and duplex to -1,
653 654
 * and return. Return 1 if speed or duplex settings are
 * UNKNOWN; 0 otherwise.
L
Linus Torvalds 已提交
655
 */
656
static int bond_update_speed_duplex(struct slave *slave)
L
Linus Torvalds 已提交
657 658
{
	struct net_device *slave_dev = slave->dev;
659
	struct ethtool_link_ksettings ecmd;
660
	int res;
L
Linus Torvalds 已提交
661

662 663
	slave->speed = SPEED_UNKNOWN;
	slave->duplex = DUPLEX_UNKNOWN;
L
Linus Torvalds 已提交
664

665
	res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
666
	if (res < 0)
667
		return 1;
668
	if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
669
		return 1;
670
	switch (ecmd.base.duplex) {
L
Linus Torvalds 已提交
671 672 673 674
	case DUPLEX_FULL:
	case DUPLEX_HALF:
		break;
	default:
675
		return 1;
L
Linus Torvalds 已提交
676 677
	}

678 679
	slave->speed = ecmd.base.speed;
	slave->duplex = ecmd.base.duplex;
L
Linus Torvalds 已提交
680

681
	return 0;
L
Linus Torvalds 已提交
682 683
}

684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
const char *bond_slave_link_status(s8 link)
{
	switch (link) {
	case BOND_LINK_UP:
		return "up";
	case BOND_LINK_FAIL:
		return "going down";
	case BOND_LINK_DOWN:
		return "down";
	case BOND_LINK_BACK:
		return "going back";
	default:
		return "unknown";
	}
}

700
/* if <dev> supports MII link status reporting, check its link status.
L
Linus Torvalds 已提交
701 702
 *
 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
S
Stephen Hemminger 已提交
703
 * depending upon the setting of the use_carrier parameter.
L
Linus Torvalds 已提交
704 705 706 707 708 709 710 711 712 713 714
 *
 * Return either BMSR_LSTATUS, meaning that the link is up (or we
 * can't tell and just pretend it is), or 0, meaning that the link is
 * down.
 *
 * If reporting is non-zero, instead of faking link up, return -1 if
 * both ETHTOOL and MII ioctls fail (meaning the device does not
 * support them).  If use_carrier is set, return whatever it says.
 * It'd be nice if there was a good way to tell if a driver supports
 * netif_carrier, but there really isn't.
 */
S
Stephen Hemminger 已提交
715 716
static int bond_check_dev_link(struct bonding *bond,
			       struct net_device *slave_dev, int reporting)
L
Linus Torvalds 已提交
717
{
718
	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
719
	int (*ioctl)(struct net_device *, struct ifreq *, int);
L
Linus Torvalds 已提交
720 721 722
	struct ifreq ifr;
	struct mii_ioctl_data *mii;

723 724 725
	if (!reporting && !netif_running(slave_dev))
		return 0;

726
	if (bond->params.use_carrier)
727
		return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
L
Linus Torvalds 已提交
728

729
	/* Try to get link status using Ethtool first. */
730 731 732
	if (slave_dev->ethtool_ops->get_link)
		return slave_dev->ethtool_ops->get_link(slave_dev) ?
			BMSR_LSTATUS : 0;
733

S
Stephen Hemminger 已提交
734
	/* Ethtool can't be used, fallback to MII ioctls. */
735
	ioctl = slave_ops->ndo_eth_ioctl;
L
Linus Torvalds 已提交
736
	if (ioctl) {
737 738 739 740 741 742 743 744
		/* TODO: set pointer to correct ioctl on a per team member
		 *       bases to make this more efficient. that is, once
		 *       we determine the correct ioctl, we will always
		 *       call it and not the others for that team
		 *       member.
		 */

		/* We cannot assume that SIOCGMIIPHY will also read a
L
Linus Torvalds 已提交
745 746 747 748 749
		 * register; not all network drivers (e.g., e100)
		 * support that.
		 */

		/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
750
		strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
L
Linus Torvalds 已提交
751
		mii = if_mii(&ifr);
A
Al Viro 已提交
752
		if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
L
Linus Torvalds 已提交
753
			mii->reg_num = MII_BMSR;
A
Al Viro 已提交
754
			if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
S
Stephen Hemminger 已提交
755
				return mii->val_out & BMSR_LSTATUS;
L
Linus Torvalds 已提交
756 757 758
		}
	}

759
	/* If reporting, report that either there's no ndo_eth_ioctl,
760
	 * or both SIOCGMIIREG and get_link failed (meaning that we
L
Linus Torvalds 已提交
761 762 763
	 * cannot report link status).  If not reporting, pretend
	 * we're ok.
	 */
S
Stephen Hemminger 已提交
764
	return reporting ? -1 : BMSR_LSTATUS;
L
Linus Torvalds 已提交
765 766 767 768
}

/*----------------------------- Multicast list ------------------------------*/

769
/* Push the promiscuity flag down to appropriate slaves */
770
static int bond_set_promiscuity(struct bonding *bond, int inc)
L
Linus Torvalds 已提交
771
{
772
	struct list_head *iter;
773
	int err = 0;
774

775
	if (bond_uses_primary(bond)) {
776
		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
777 778 779

		if (curr_active)
			err = dev_set_promiscuity(curr_active->dev, inc);
L
Linus Torvalds 已提交
780 781
	} else {
		struct slave *slave;
782

783
		bond_for_each_slave(bond, slave, iter) {
784 785 786
			err = dev_set_promiscuity(slave->dev, inc);
			if (err)
				return err;
L
Linus Torvalds 已提交
787 788
		}
	}
789
	return err;
L
Linus Torvalds 已提交
790 791
}

792
/* Push the allmulti flag down to all slaves */
793
static int bond_set_allmulti(struct bonding *bond, int inc)
L
Linus Torvalds 已提交
794
{
795
	struct list_head *iter;
796
	int err = 0;
797

798
	if (bond_uses_primary(bond)) {
799
		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
800 801 802

		if (curr_active)
			err = dev_set_allmulti(curr_active->dev, inc);
L
Linus Torvalds 已提交
803 804
	} else {
		struct slave *slave;
805

806
		bond_for_each_slave(bond, slave, iter) {
807 808 809
			err = dev_set_allmulti(slave->dev, inc);
			if (err)
				return err;
L
Linus Torvalds 已提交
810 811
		}
	}
812
	return err;
L
Linus Torvalds 已提交
813 814
}

815
/* Retrieve the list of registered multicast addresses for the bonding
816 817 818
 * device and retransmit an IGMP JOIN request to the current active
 * slave.
 */
819
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
820
{
821 822 823
	struct bonding *bond = container_of(work, struct bonding,
					    mcast_work.work);

824
	if (!rtnl_trylock()) {
825
		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
826
		return;
827
	}
828
	call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
829

830 831
	if (bond->igmp_retrans > 1) {
		bond->igmp_retrans--;
832
		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
833
	}
834
	rtnl_unlock();
835 836
}

837
/* Flush bond's hardware addresses from slave */
838
static void bond_hw_addr_flush(struct net_device *bond_dev,
S
Stephen Hemminger 已提交
839
			       struct net_device *slave_dev)
L
Linus Torvalds 已提交
840
{
841
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
842

843 844
	dev_uc_unsync(slave_dev, bond_dev);
	dev_mc_unsync(slave_dev, bond_dev);
L
Linus Torvalds 已提交
845

846
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
L
Linus Torvalds 已提交
847 848 849
		/* del lacpdu mc addr from mc list */
		u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;

850
		dev_mc_del(slave_dev, lacpdu_multicast);
L
Linus Torvalds 已提交
851 852 853 854 855
	}
}

/*--------------------------- Active slave change ---------------------------*/

856
/* Update the hardware address list and promisc/allmulti for the new and
857 858
 * old active slaves (if any).  Modes that are not using primary keep all
 * slaves up date at all times; only the modes that use primary need to call
859
 * this function to swap these settings during a failover.
L
Linus Torvalds 已提交
860
 */
861 862
static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
			      struct slave *old_active)
L
Linus Torvalds 已提交
863 864
{
	if (old_active) {
S
Stephen Hemminger 已提交
865
		if (bond->dev->flags & IFF_PROMISC)
L
Linus Torvalds 已提交
866 867
			dev_set_promiscuity(old_active->dev, -1);

S
Stephen Hemminger 已提交
868
		if (bond->dev->flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
869 870
			dev_set_allmulti(old_active->dev, -1);

871
		bond_hw_addr_flush(bond->dev, old_active->dev);
L
Linus Torvalds 已提交
872 873 874
	}

	if (new_active) {
875
		/* FIXME: Signal errors upstream. */
S
Stephen Hemminger 已提交
876
		if (bond->dev->flags & IFF_PROMISC)
L
Linus Torvalds 已提交
877 878
			dev_set_promiscuity(new_active->dev, 1);

S
Stephen Hemminger 已提交
879
		if (bond->dev->flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
880 881
			dev_set_allmulti(new_active->dev, 1);

882
		netif_addr_lock_bh(bond->dev);
883 884
		dev_uc_sync(new_active->dev, bond->dev);
		dev_mc_sync(new_active->dev, bond->dev);
885
		netif_addr_unlock_bh(bond->dev);
L
Linus Torvalds 已提交
886 887 888
	}
}

889 890 891 892 893 894 895
/**
 * bond_set_dev_addr - clone slave's address to bond
 * @bond_dev: bond net device
 * @slave_dev: slave net device
 *
 * Should be called with RTNL held.
 */
896 897
static int bond_set_dev_addr(struct net_device *bond_dev,
			     struct net_device *slave_dev)
898
{
899 900
	int err;

901 902
	slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
		  bond_dev, slave_dev, slave_dev->addr_len);
903 904 905 906
	err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
	if (err)
		return err;

907 908 909
	memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
	bond_dev->addr_assign_type = NET_ADDR_STOLEN;
	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
910
	return 0;
911 912
}

913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
static struct slave *bond_get_old_active(struct bonding *bond,
					 struct slave *new_active)
{
	struct slave *slave;
	struct list_head *iter;

	bond_for_each_slave(bond, slave, iter) {
		if (slave == new_active)
			continue;

		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
			return slave;
	}

	return NULL;
}

930
/* bond_do_fail_over_mac
931 932 933
 *
 * Perform special MAC address swapping for fail_over_mac settings
 *
934
 * Called with RTNL
935 936 937 938 939
 */
static void bond_do_fail_over_mac(struct bonding *bond,
				  struct slave *new_active,
				  struct slave *old_active)
{
940 941
	u8 tmp_mac[MAX_ADDR_LEN];
	struct sockaddr_storage ss;
942 943 944 945
	int rv;

	switch (bond->params.fail_over_mac) {
	case BOND_FOM_ACTIVE:
946 947 948
		if (new_active) {
			rv = bond_set_dev_addr(bond->dev, new_active->dev);
			if (rv)
949 950
				slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
					  -rv);
951
		}
952 953
		break;
	case BOND_FOM_FOLLOW:
954
		/* if new_active && old_active, swap them
955 956 957 958 959 960
		 * if just old_active, do nothing (going to no active slave)
		 * if just new_active, set new_active to bond's MAC
		 */
		if (!new_active)
			return;

961 962 963
		if (!old_active)
			old_active = bond_get_old_active(bond, new_active);

964
		if (old_active) {
965 966 967 968 969 970
			bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
					  new_active->dev->addr_len);
			bond_hw_addr_copy(ss.__data,
					  old_active->dev->dev_addr,
					  old_active->dev->addr_len);
			ss.ss_family = new_active->dev->type;
971
		} else {
972 973 974
			bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
					  bond->dev->addr_len);
			ss.ss_family = bond->dev->type;
975 976
		}

977
		rv = dev_set_mac_address(new_active->dev,
978
					 (struct sockaddr *)&ss, NULL);
979
		if (rv) {
980 981
			slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
				  -rv);
982 983 984 985 986 987
			goto out;
		}

		if (!old_active)
			goto out;

988 989 990
		bond_hw_addr_copy(ss.__data, tmp_mac,
				  new_active->dev->addr_len);
		ss.ss_family = old_active->dev->type;
991

992
		rv = dev_set_mac_address(old_active->dev,
993
					 (struct sockaddr *)&ss, NULL);
994
		if (rv)
995 996
			slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
				  -rv);
997 998 999
out:
		break;
	default:
1000 1001
		netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
			   bond->params.fail_over_mac);
1002 1003 1004 1005 1006
		break;
	}

}

1007
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
1008
{
1009
	struct slave *prim = rtnl_dereference(bond->primary_slave);
1010
	struct slave *curr = rtnl_dereference(bond->curr_active_slave);
1011

1012 1013 1014 1015 1016 1017
	if (!prim || prim->link != BOND_LINK_UP) {
		if (!curr || curr->link != BOND_LINK_UP)
			return NULL;
		return curr;
	}

1018 1019
	if (bond->force_primary) {
		bond->force_primary = false;
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
		return prim;
	}

	if (!curr || curr->link != BOND_LINK_UP)
		return prim;

	/* At this point, prim and curr are both up */
	switch (bond->params.primary_reselect) {
	case BOND_PRI_RESELECT_ALWAYS:
		return prim;
	case BOND_PRI_RESELECT_BETTER:
		if (prim->speed < curr->speed)
			return curr;
		if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
			return curr;
		return prim;
	case BOND_PRI_RESELECT_FAILURE:
		return curr;
	default:
		netdev_err(bond->dev, "impossible primary_reselect %d\n",
			   bond->params.primary_reselect);
		return curr;
1042 1043
	}
}
1044

L
Linus Torvalds 已提交
1045
/**
1046
 * bond_find_best_slave - select the best available slave to be the active one
L
Linus Torvalds 已提交
1047 1048 1049 1050
 * @bond: our bonding struct
 */
static struct slave *bond_find_best_slave(struct bonding *bond)
{
1051
	struct slave *slave, *bestslave = NULL;
1052
	struct list_head *iter;
L
Linus Torvalds 已提交
1053 1054
	int mintime = bond->params.updelay;

1055 1056 1057
	slave = bond_choose_primary_or_current(bond);
	if (slave)
		return slave;
L
Linus Torvalds 已提交
1058

1059 1060 1061
	bond_for_each_slave(bond, slave, iter) {
		if (slave->link == BOND_LINK_UP)
			return slave;
1062
		if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
1063 1064 1065
		    slave->delay < mintime) {
			mintime = slave->delay;
			bestslave = slave;
L
Linus Torvalds 已提交
1066 1067 1068 1069 1070 1071
		}
	}

	return bestslave;
}

1072 1073
static bool bond_should_notify_peers(struct bonding *bond)
{
1074 1075 1076 1077 1078
	struct slave *slave;

	rcu_read_lock();
	slave = rcu_dereference(bond->curr_active_slave);
	rcu_read_unlock();
1079

1080 1081
	netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
		   slave ? slave->dev->name : "NULL");
1082 1083

	if (!slave || !bond->send_peer_notif ||
1084 1085
	    bond->send_peer_notif %
	    max(1, bond->params.peer_notif_delay) != 0 ||
1086
	    !netif_carrier_ok(bond->dev) ||
1087 1088 1089 1090 1091 1092
	    test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
		return false;

	return true;
}

L
Linus Torvalds 已提交
1093
/**
1094
 * bond_change_active_slave - change the active slave into the specified one
L
Linus Torvalds 已提交
1095
 * @bond: our bonding struct
1096
 * @new_active: the new slave to make the active one
L
Linus Torvalds 已提交
1097 1098 1099 1100 1101 1102 1103 1104 1105
 *
 * Set the new slave to the bond's settings and unset them on the old
 * curr_active_slave.
 * Setting include flags, mc-list, promiscuity, allmulti, etc.
 *
 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
 * because it is apparently the best available slave we have, even though its
 * updelay hasn't timed out yet.
 *
1106
 * Caller must hold RTNL.
L
Linus Torvalds 已提交
1107
 */
1108
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
L
Linus Torvalds 已提交
1109
{
1110 1111
	struct slave *old_active;

1112 1113 1114
	ASSERT_RTNL();

	old_active = rtnl_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
1115

S
Stephen Hemminger 已提交
1116
	if (old_active == new_active)
L
Linus Torvalds 已提交
1117 1118
		return;

1119
#ifdef CONFIG_XFRM_OFFLOAD
1120
	bond_ipsec_del_sa_all(bond);
1121 1122
#endif /* CONFIG_XFRM_OFFLOAD */

1123
	if (new_active) {
1124
		new_active->last_link_up = jiffies;
1125

L
Linus Torvalds 已提交
1126
		if (new_active->link == BOND_LINK_BACK) {
1127
			if (bond_uses_primary(bond)) {
1128 1129
				slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
					   (bond->params.updelay - new_active->delay) * bond->params.miimon);
L
Linus Torvalds 已提交
1130 1131 1132
			}

			new_active->delay = 0;
1133 1134
			bond_set_slave_link_state(new_active, BOND_LINK_UP,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1135

1136
			if (BOND_MODE(bond) == BOND_MODE_8023AD)
L
Linus Torvalds 已提交
1137 1138
				bond_3ad_handle_link_change(new_active, BOND_LINK_UP);

1139
			if (bond_is_lb(bond))
L
Linus Torvalds 已提交
1140 1141
				bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
		} else {
1142
			if (bond_uses_primary(bond))
1143
				slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
L
Linus Torvalds 已提交
1144 1145 1146
		}
	}

1147
	if (bond_uses_primary(bond))
1148
		bond_hw_addr_swap(bond, new_active, old_active);
L
Linus Torvalds 已提交
1149

1150
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
1151
		bond_alb_handle_active_change(bond, new_active);
1152
		if (old_active)
1153 1154
			bond_set_slave_inactive_flags(old_active,
						      BOND_SLAVE_NOTIFY_NOW);
1155
		if (new_active)
1156 1157
			bond_set_slave_active_flags(new_active,
						    BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1158
	} else {
1159
		rcu_assign_pointer(bond->curr_active_slave, new_active);
L
Linus Torvalds 已提交
1160
	}
J
Jay Vosburgh 已提交
1161

1162
	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
S
Stephen Hemminger 已提交
1163
		if (old_active)
1164 1165
			bond_set_slave_inactive_flags(old_active,
						      BOND_SLAVE_NOTIFY_NOW);
J
Jay Vosburgh 已提交
1166 1167

		if (new_active) {
1168 1169
			bool should_notify_peers = false;

1170 1171
			bond_set_slave_active_flags(new_active,
						    BOND_SLAVE_NOTIFY_NOW);
1172

1173 1174 1175
			if (bond->params.fail_over_mac)
				bond_do_fail_over_mac(bond, new_active,
						      old_active);
1176

1177 1178
			if (netif_running(bond->dev)) {
				bond->send_peer_notif =
1179 1180
					bond->params.num_peer_notif *
					max(1, bond->params.peer_notif_delay);
1181 1182 1183 1184
				should_notify_peers =
					bond_should_notify_peers(bond);
			}

1185
			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
1186 1187
			if (should_notify_peers) {
				bond->send_peer_notif--;
1188 1189
				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
							 bond->dev);
1190
			}
1191
		}
J
Jay Vosburgh 已提交
1192
	}
1193

1194
#ifdef CONFIG_XFRM_OFFLOAD
1195
	bond_ipsec_add_sa_all(bond);
1196 1197
#endif /* CONFIG_XFRM_OFFLOAD */

1198
	/* resend IGMP joins since active slave has changed or
1199 1200
	 * all were sent on curr_active_slave.
	 * resend only if bond is brought up with the affected
1201 1202
	 * bonding modes and the retransmission is enabled
	 */
1203
	if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
1204
	    ((bond_uses_primary(bond) && new_active) ||
1205
	     BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
1206
		bond->igmp_retrans = bond->params.resend_igmp;
1207
		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
1208
	}
L
Linus Torvalds 已提交
1209 1210 1211 1212 1213 1214
}

/**
 * bond_select_active_slave - select a new active slave, if needed
 * @bond: our bonding struct
 *
S
Stephen Hemminger 已提交
1215
 * This functions should be called when one of the following occurs:
L
Linus Torvalds 已提交
1216 1217 1218 1219
 * - The old curr_active_slave has been released or lost its link.
 * - The primary_slave has got its link back.
 * - A slave has got its link back and there's no old curr_active_slave.
 *
1220
 * Caller must hold RTNL.
L
Linus Torvalds 已提交
1221
 */
1222
void bond_select_active_slave(struct bonding *bond)
L
Linus Torvalds 已提交
1223 1224
{
	struct slave *best_slave;
1225
	int rv;
L
Linus Torvalds 已提交
1226

1227 1228
	ASSERT_RTNL();

L
Linus Torvalds 已提交
1229
	best_slave = bond_find_best_slave(bond);
1230
	if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
L
Linus Torvalds 已提交
1231
		bond_change_active_slave(bond, best_slave);
1232 1233 1234 1235
		rv = bond_set_carrier(bond);
		if (!rv)
			return;

Z
Zhang Shengju 已提交
1236
		if (netif_carrier_ok(bond->dev))
1237
			netdev_info(bond->dev, "active interface up!\n");
Z
Zhang Shengju 已提交
1238
		else
1239
			netdev_info(bond->dev, "now running without any active interface!\n");
L
Linus Torvalds 已提交
1240 1241 1242
	}
}

1243
#ifdef CONFIG_NET_POLL_CONTROLLER
1244
static inline int slave_enable_netpoll(struct slave *slave)
1245
{
1246 1247
	struct netpoll *np;
	int err = 0;
1248

1249
	np = kzalloc(sizeof(*np), GFP_KERNEL);
1250 1251 1252 1253
	err = -ENOMEM;
	if (!np)
		goto out;

1254
	err = __netpoll_setup(np, slave->dev);
1255 1256 1257
	if (err) {
		kfree(np);
		goto out;
1258
	}
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
	slave->np = np;
out:
	return err;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
	struct netpoll *np = slave->np;

	if (!np)
		return;

	slave->np = NULL;
1271 1272

	__netpoll_free(np);
1273
}
1274 1275 1276

static void bond_poll_controller(struct net_device *bond_dev)
{
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
	struct bonding *bond = netdev_priv(bond_dev);
	struct slave *slave = NULL;
	struct list_head *iter;
	struct ad_info ad_info;

	if (BOND_MODE(bond) == BOND_MODE_8023AD)
		if (bond_3ad_get_active_agg_info(bond, &ad_info))
			return;

	bond_for_each_slave_rcu(bond, slave, iter) {
1287
		if (!bond_slave_is_up(slave))
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
			continue;

		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			struct aggregator *agg =
			    SLAVE_AD_INFO(slave)->port.aggregator;

			if (agg &&
			    agg->aggregator_identifier != ad_info.aggregator_id)
				continue;
		}

1299
		netpoll_poll_dev(slave->dev);
1300
	}
1301 1302
}

1303
static void bond_netpoll_cleanup(struct net_device *bond_dev)
1304
{
1305
	struct bonding *bond = netdev_priv(bond_dev);
1306
	struct list_head *iter;
1307 1308
	struct slave *slave;

1309
	bond_for_each_slave(bond, slave, iter)
1310
		if (bond_slave_is_up(slave))
1311
			slave_disable_netpoll(slave);
1312
}
1313

1314
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1315 1316
{
	struct bonding *bond = netdev_priv(dev);
1317
	struct list_head *iter;
1318
	struct slave *slave;
1319
	int err = 0;
1320

1321
	bond_for_each_slave(bond, slave, iter) {
1322 1323
		err = slave_enable_netpoll(slave);
		if (err) {
1324
			bond_netpoll_cleanup(dev);
1325
			break;
1326 1327
		}
	}
1328
	return err;
1329
}
1330 1331 1332 1333 1334 1335 1336 1337
#else
static inline int slave_enable_netpoll(struct slave *slave)
{
	return 0;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
}
1338 1339 1340 1341 1342
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
}
#endif

L
Linus Torvalds 已提交
1343 1344
/*---------------------------------- IOCTL ----------------------------------*/

1345
static netdev_features_t bond_fix_features(struct net_device *dev,
1346
					   netdev_features_t features)
1347
{
1348
	struct bonding *bond = netdev_priv(dev);
1349
	struct list_head *iter;
1350
	netdev_features_t mask;
1351
	struct slave *slave;
1352

1353 1354 1355 1356 1357 1358 1359
#if IS_ENABLED(CONFIG_TLS_DEVICE)
	if (bond_sk_check(bond))
		features |= BOND_TLS_FEATURES;
	else
		features &= ~BOND_TLS_FEATURES;
#endif

1360
	mask = features;
1361

1362
	features &= ~NETIF_F_ONE_FOR_ALL;
1363
	features |= NETIF_F_ALL_FOR_ALL;
1364

1365
	bond_for_each_slave(bond, slave, iter) {
1366 1367
		features = netdev_increment_features(features,
						     slave->dev->features,
1368 1369
						     mask);
	}
1370
	features = netdev_add_tso_features(features, mask);
1371 1372 1373 1374

	return features;
}

1375
#define BOND_VLAN_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1376
				 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
1377
				 NETIF_F_HIGHDMA | NETIF_F_LRO)
1378

1379
#define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1380
				 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
1381

1382
#define BOND_MPLS_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1383
				 NETIF_F_GSO_SOFTWARE)
1384

1385

1386 1387
static void bond_compute_features(struct bonding *bond)
{
1388 1389
	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
					IFF_XMIT_DST_RELEASE_PERM;
1390
	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1391
	netdev_features_t enc_features  = BOND_ENC_FEATURES;
1392 1393 1394
#ifdef CONFIG_XFRM_OFFLOAD
	netdev_features_t xfrm_features  = BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
1395
	netdev_features_t mpls_features  = BOND_MPLS_FEATURES;
1396 1397 1398
	struct net_device *bond_dev = bond->dev;
	struct list_head *iter;
	struct slave *slave;
1399
	unsigned short max_hard_header_len = ETH_HLEN;
1400 1401
	unsigned int gso_max_size = GSO_MAX_SIZE;
	u16 gso_max_segs = GSO_MAX_SEGS;
1402

1403
	if (!bond_has_slaves(bond))
1404
		goto done;
1405
	vlan_features &= NETIF_F_ALL_FOR_ALL;
1406
	mpls_features &= NETIF_F_ALL_FOR_ALL;
1407

1408
	bond_for_each_slave(bond, slave, iter) {
1409
		vlan_features = netdev_increment_features(vlan_features,
1410 1411
			slave->dev->vlan_features, BOND_VLAN_FEATURES);

1412 1413 1414
		enc_features = netdev_increment_features(enc_features,
							 slave->dev->hw_enc_features,
							 BOND_ENC_FEATURES);
1415

1416 1417 1418 1419 1420 1421
#ifdef CONFIG_XFRM_OFFLOAD
		xfrm_features = netdev_increment_features(xfrm_features,
							  slave->dev->hw_enc_features,
							  BOND_XFRM_FEATURES);
#endif /* CONFIG_XFRM_OFFLOAD */

1422 1423 1424 1425
		mpls_features = netdev_increment_features(mpls_features,
							  slave->dev->mpls_features,
							  BOND_MPLS_FEATURES);

1426
		dst_release_flag &= slave->dev->priv_flags;
1427 1428
		if (slave->dev->hard_header_len > max_hard_header_len)
			max_hard_header_len = slave->dev->hard_header_len;
1429 1430 1431

		gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
		gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
1432
	}
1433
	bond_dev->hard_header_len = max_hard_header_len;
1434

1435
done:
1436
	bond_dev->vlan_features = vlan_features;
1437
	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1438
				    NETIF_F_HW_VLAN_CTAG_TX |
1439
				    NETIF_F_HW_VLAN_STAG_TX;
1440 1441 1442
#ifdef CONFIG_XFRM_OFFLOAD
	bond_dev->hw_enc_features |= xfrm_features;
#endif /* CONFIG_XFRM_OFFLOAD */
1443
	bond_dev->mpls_features = mpls_features;
1444 1445
	bond_dev->gso_max_segs = gso_max_segs;
	netif_set_gso_max_size(bond_dev, gso_max_size);
1446

1447 1448 1449 1450
	bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
	if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
	    dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
		bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1451

1452
	netdev_change_features(bond_dev);
1453 1454
}

1455 1456 1457
static void bond_setup_by_slave(struct net_device *bond_dev,
				struct net_device *slave_dev)
{
1458
	bond_dev->header_ops	    = slave_dev->header_ops;
1459 1460 1461

	bond_dev->type		    = slave_dev->type;
	bond_dev->hard_header_len   = slave_dev->hard_header_len;
1462
	bond_dev->needed_headroom   = slave_dev->needed_headroom;
1463 1464 1465 1466 1467 1468
	bond_dev->addr_len	    = slave_dev->addr_len;

	memcpy(bond_dev->broadcast, slave_dev->broadcast,
		slave_dev->addr_len);
}

1469
/* On bonding slaves other than the currently active slave, suppress
1470
 * duplicates except for alb non-mcast/bcast.
1471 1472
 */
static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1473 1474
					    struct slave *slave,
					    struct bonding *bond)
1475
{
1476
	if (bond_is_slave_inactive(slave)) {
1477
		if (BOND_MODE(bond) == BOND_MODE_ALB &&
1478 1479 1480 1481 1482 1483 1484 1485
		    skb->pkt_type != PACKET_BROADCAST &&
		    skb->pkt_type != PACKET_MULTICAST)
			return false;
		return true;
	}
	return false;
}

1486
static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1487
{
1488
	struct sk_buff *skb = *pskb;
1489
	struct slave *slave;
1490
	struct bonding *bond;
1491 1492
	int (*recv_probe)(const struct sk_buff *, struct bonding *,
			  struct slave *);
1493
	int ret = RX_HANDLER_ANOTHER;
1494

1495 1496 1497 1498 1499
	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return RX_HANDLER_CONSUMED;

	*pskb = skb;
1500

J
Jiri Pirko 已提交
1501 1502
	slave = bond_slave_get_rcu(skb->dev);
	bond = slave->bond;
1503

1504
	recv_probe = READ_ONCE(bond->recv_probe);
1505
	if (recv_probe) {
1506 1507 1508 1509
		ret = recv_probe(skb, bond, slave);
		if (ret == RX_HANDLER_CONSUMED) {
			consume_skb(skb);
			return ret;
1510 1511 1512
		}
	}

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
	/*
	 * For packets determined by bond_should_deliver_exact_match() call to
	 * be suppressed we want to make an exception for link-local packets.
	 * This is necessary for e.g. LLDP daemons to be able to monitor
	 * inactive slave links without being forced to bind to them
	 * explicitly.
	 *
	 * At the same time, packets that are passed to the bonding master
	 * (including link-local ones) can have their originating interface
	 * determined via PACKET_ORIGDEV socket option.
1523
	 */
1524 1525 1526
	if (bond_should_deliver_exact_match(skb, slave, bond)) {
		if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
			return RX_HANDLER_PASS;
1527
		return RX_HANDLER_EXACT;
1528
	}
1529

J
Jiri Pirko 已提交
1530
	skb->dev = bond->dev;
1531

1532
	if (BOND_MODE(bond) == BOND_MODE_ALB &&
1533
	    netif_is_bridge_port(bond->dev) &&
1534 1535
	    skb->pkt_type == PACKET_HOST) {

1536 1537 1538
		if (unlikely(skb_cow_head(skb,
					  skb->data - skb_mac_header(skb)))) {
			kfree_skb(skb);
1539
			return RX_HANDLER_CONSUMED;
1540
		}
1541 1542
		bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
				  bond->dev->addr_len);
1543 1544
	}

1545
	return ret;
1546 1547
}

1548
static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1549
{
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
	switch (BOND_MODE(bond)) {
	case BOND_MODE_ROUNDROBIN:
		return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
	case BOND_MODE_ACTIVEBACKUP:
		return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
	case BOND_MODE_BROADCAST:
		return NETDEV_LAG_TX_TYPE_BROADCAST;
	case BOND_MODE_XOR:
	case BOND_MODE_8023AD:
		return NETDEV_LAG_TX_TYPE_HASH;
	default:
		return NETDEV_LAG_TX_TYPE_UNKNOWN;
	}
}

1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
					       enum netdev_lag_tx_type type)
{
	if (type != NETDEV_LAG_TX_TYPE_HASH)
		return NETDEV_LAG_HASH_NONE;

	switch (bond->params.xmit_policy) {
	case BOND_XMIT_POLICY_LAYER2:
		return NETDEV_LAG_HASH_L2;
	case BOND_XMIT_POLICY_LAYER34:
		return NETDEV_LAG_HASH_L34;
	case BOND_XMIT_POLICY_LAYER23:
		return NETDEV_LAG_HASH_L23;
	case BOND_XMIT_POLICY_ENCAP23:
		return NETDEV_LAG_HASH_E23;
	case BOND_XMIT_POLICY_ENCAP34:
		return NETDEV_LAG_HASH_E34;
1582 1583
	case BOND_XMIT_POLICY_VLAN_SRCMAC:
		return NETDEV_LAG_HASH_VLAN_SRCMAC;
1584 1585 1586 1587 1588
	default:
		return NETDEV_LAG_HASH_UNKNOWN;
	}
}

1589 1590
static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
				      struct netlink_ext_ack *extack)
1591 1592
{
	struct netdev_lag_upper_info lag_upper_info;
1593
	enum netdev_lag_tx_type type;
1594

1595 1596 1597
	type = bond_lag_tx_type(bond);
	lag_upper_info.tx_type = type;
	lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1598 1599 1600

	return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
					    &lag_upper_info, extack);
1601 1602
}

1603
static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1604
{
1605 1606
	netdev_upper_dev_unlink(slave->dev, bond->dev);
	slave->dev->flags &= ~IFF_SLAVE;
1607 1608
}

1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
static void slave_kobj_release(struct kobject *kobj)
{
	struct slave *slave = to_slave(kobj);
	struct bonding *bond = bond_get_bond_by_slave(slave);

	cancel_delayed_work_sync(&slave->notify_work);
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
		kfree(SLAVE_AD_INFO(slave));

	kfree(slave);
}

static struct kobj_type slave_ktype = {
	.release = slave_kobj_release,
#ifdef CONFIG_SYSFS
	.sysfs_ops = &slave_sysfs_ops,
#endif
};

static int bond_kobj_init(struct slave *slave)
{
	int err;

	err = kobject_init_and_add(&slave->kobj, &slave_ktype,
				   &(slave->dev->dev.kobj), "bonding_slave");
	if (err)
		kobject_put(&slave->kobj);

	return err;
}

static struct slave *bond_alloc_slave(struct bonding *bond,
				      struct net_device *slave_dev)
1642 1643 1644
{
	struct slave *slave = NULL;

Z
Zhang Shengju 已提交
1645
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1646 1647 1648
	if (!slave)
		return NULL;

1649 1650
	slave->bond = bond;
	slave->dev = slave_dev;
1651
	INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1652 1653 1654 1655

	if (bond_kobj_init(slave))
		return NULL;

1656
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1657 1658 1659
		SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
					       GFP_KERNEL);
		if (!SLAVE_AD_INFO(slave)) {
1660
			kobject_put(&slave->kobj);
1661 1662 1663
			return NULL;
		}
	}
1664

1665 1666 1667
	return slave;
}

1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
{
	info->bond_mode = BOND_MODE(bond);
	info->miimon = bond->params.miimon;
	info->num_slaves = bond->slave_cnt;
}

static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
{
	strcpy(info->slave_name, slave->dev->name);
	info->link = slave->link;
	info->state = bond_slave_state(slave);
	info->link_failure_count = slave->link_failure_count;
}

1683 1684
static void bond_netdev_notify_work(struct work_struct *_work)
{
1685 1686 1687 1688 1689
	struct slave *slave = container_of(_work, struct slave,
					   notify_work.work);

	if (rtnl_trylock()) {
		struct netdev_bonding_info binfo;
1690

1691 1692 1693 1694 1695 1696 1697
		bond_fill_ifslave(slave, &binfo.slave);
		bond_fill_ifbond(slave->bond, &binfo.master);
		netdev_bonding_info_change(slave->dev, &binfo);
		rtnl_unlock();
	} else {
		queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
	}
1698 1699 1700 1701
}

void bond_queue_slave_event(struct slave *slave)
{
1702
	queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1703 1704
}

1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
void bond_lower_state_changed(struct slave *slave)
{
	struct netdev_lag_lower_state_info info;

	info.link_up = slave->link == BOND_LINK_UP ||
		       slave->link == BOND_LINK_FAIL;
	info.tx_enabled = bond_is_active_slave(slave);
	netdev_lower_state_changed(slave->dev, &info);
}

L
Linus Torvalds 已提交
1715
/* enslave device <slave> to bond device <master> */
D
David Ahern 已提交
1716 1717
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
		 struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
1718
{
1719
	struct bonding *bond = netdev_priv(bond_dev);
1720
	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1721
	struct slave *new_slave = NULL, *prev_slave;
1722
	struct sockaddr_storage ss;
L
Linus Torvalds 已提交
1723
	int link_reporting;
1724
	int res = 0, i;
L
Linus Torvalds 已提交
1725

1726 1727 1728
	if (slave_dev->flags & IFF_MASTER &&
	    !netif_is_bond_master(slave_dev)) {
		NL_SET_ERR_MSG(extack, "Device with IFF_MASTER cannot be enslaved");
1729 1730 1731 1732 1733
		netdev_err(bond_dev,
			   "Error: Device with IFF_MASTER cannot be enslaved\n");
		return -EPERM;
	}

1734 1735
	if (!bond->params.use_carrier &&
	    slave_dev->ethtool_ops->get_link == NULL &&
1736
	    slave_ops->ndo_eth_ioctl == NULL) {
1737
		slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
L
Linus Torvalds 已提交
1738 1739
	}

M
Mahesh Bandewar 已提交
1740 1741
	/* already in-use? */
	if (netdev_is_rx_handler_busy(slave_dev)) {
1742
		NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved");
1743 1744
		slave_err(bond_dev, slave_dev,
			  "Error: Device is in use and cannot be enslaved\n");
L
Linus Torvalds 已提交
1745 1746 1747
		return -EBUSY;
	}

1748
	if (bond_dev == slave_dev) {
1749
		NL_SET_ERR_MSG(extack, "Cannot enslave bond to itself.");
1750
		netdev_err(bond_dev, "cannot enslave bond to itself.\n");
1751 1752 1753
		return -EPERM;
	}

L
Linus Torvalds 已提交
1754 1755 1756
	/* vlan challenged mutual exclusion */
	/* no need to lock since we're protected by rtnl_lock */
	if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1757
		slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
1758
		if (vlan_uses_dev(bond_dev)) {
1759
			NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond");
1760
			slave_err(bond_dev, slave_dev, "Error: cannot enslave VLAN challenged slave on VLAN enabled bond\n");
L
Linus Torvalds 已提交
1761 1762
			return -EPERM;
		} else {
1763
			slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
L
Linus Torvalds 已提交
1764 1765
		}
	} else {
1766
		slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
L
Linus Torvalds 已提交
1767 1768
	}

1769 1770 1771
	if (slave_dev->features & NETIF_F_HW_ESP)
		slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n");

1772
	/* Old ifenslave binaries are no longer supported.  These can
S
Stephen Hemminger 已提交
1773
	 * be identified with moderate accuracy by the state of the slave:
1774 1775 1776
	 * the current ifenslave will set the interface down prior to
	 * enslaving it; the old ifenslave will not.
	 */
Y
yzhu1 已提交
1777
	if (slave_dev->flags & IFF_UP) {
1778
		NL_SET_ERR_MSG(extack, "Device can not be enslaved while up");
1779
		slave_err(bond_dev, slave_dev, "slave is up - this may be due to an out of date ifenslave\n");
1780
		return -EPERM;
1781
	}
L
Linus Torvalds 已提交
1782

1783 1784 1785 1786 1787 1788 1789
	/* set bonding device ether type by slave - bonding netdevices are
	 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
	 * there is a need to override some of the type dependent attribs/funcs.
	 *
	 * bond ether type mutual exclusion - don't allow slaves of dissimilar
	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
	 */
1790
	if (!bond_has_slaves(bond)) {
1791
		if (bond_dev->type != slave_dev->type) {
1792 1793
			slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
				  bond_dev->type, slave_dev->type);
1794

1795 1796
			res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
						       bond_dev);
1797 1798
			res = notifier_to_errno(res);
			if (res) {
1799
				slave_err(bond_dev, slave_dev, "refused to change device type\n");
1800
				return -EBUSY;
1801
			}
1802

1803
			/* Flush unicast and multicast addresses */
1804
			dev_uc_flush(bond_dev);
1805
			dev_mc_flush(bond_dev);
1806

1807 1808
			if (slave_dev->type != ARPHRD_ETHER)
				bond_setup_by_slave(bond_dev, slave_dev);
1809
			else {
1810
				ether_setup(bond_dev);
1811 1812
				bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
			}
1813

1814 1815
			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
						 bond_dev);
1816
		}
1817
	} else if (bond_dev->type != slave_dev->type) {
1818
		NL_SET_ERR_MSG(extack, "Device type is different from other slaves");
1819 1820
		slave_err(bond_dev, slave_dev, "ether type (%d) is different from other slaves (%d), can not enslave it\n",
			  slave_dev->type, bond_dev->type);
1821
		return -EINVAL;
1822 1823
	}

1824 1825
	if (slave_dev->type == ARPHRD_INFINIBAND &&
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1826
		NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves");
1827 1828
		slave_warn(bond_dev, slave_dev, "Type (%d) supports only active-backup mode\n",
			   slave_dev->type);
1829 1830 1831 1832 1833 1834
		res = -EOPNOTSUPP;
		goto err_undo_flags;
	}

	if (!slave_ops->ndo_set_mac_address ||
	    slave_dev->type == ARPHRD_INFINIBAND) {
1835
		slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
1836 1837 1838
		if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
		    bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
			if (!bond_has_slaves(bond)) {
1839
				bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1840
				slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
1841
			} else {
1842
				NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1843
				slave_err(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
1844 1845
				res = -EOPNOTSUPP;
				goto err_undo_flags;
1846
			}
1847
		}
L
Linus Torvalds 已提交
1848 1849
	}

1850 1851
	call_netdevice_notifiers(NETDEV_JOIN, slave_dev);

1852
	/* If this is the first slave, then we need to set the master's hardware
1853 1854
	 * address to be the same as the slave's.
	 */
1855
	if (!bond_has_slaves(bond) &&
1856 1857 1858 1859 1860
	    bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
		res = bond_set_dev_addr(bond->dev, slave_dev);
		if (res)
			goto err_undo_flags;
	}
1861

1862
	new_slave = bond_alloc_slave(bond, slave_dev);
L
Linus Torvalds 已提交
1863 1864 1865 1866
	if (!new_slave) {
		res = -ENOMEM;
		goto err_undo_flags;
	}
1867

1868
	/* Set the new_slave's queue_id to be zero.  Queue ID mapping
1869 1870 1871 1872
	 * is set via sysfs or module option if desired.
	 */
	new_slave->queue_id = 0;

1873 1874 1875 1876
	/* Save slave's original mtu and then set it to match the bond */
	new_slave->original_mtu = slave_dev->mtu;
	res = dev_set_mtu(slave_dev, bond->dev->mtu);
	if (res) {
1877
		slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
1878 1879 1880
		goto err_free;
	}

1881
	/* Save slave's original ("permanent") mac address for modes
1882 1883 1884
	 * that need it, and for restoring it upon release, and then
	 * set it to the master's address
	 */
1885 1886
	bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
			  slave_dev->addr_len);
L
Linus Torvalds 已提交
1887

1888
	if (!bond->params.fail_over_mac ||
1889
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1890
		/* Set slave to master's mac address.  The application already
1891 1892
		 * set the master's mac address to that of the first slave
		 */
1893 1894
		memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
		ss.ss_family = slave_dev->type;
1895 1896
		res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
					  extack);
1897
		if (res) {
1898
			slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
1899
			goto err_restore_mtu;
1900
		}
1901
	}
L
Linus Torvalds 已提交
1902

1903 1904 1905
	/* set slave flag before open to prevent IPv6 addrconf */
	slave_dev->flags |= IFF_SLAVE;

1906
	/* open the slave since the application closed it */
1907
	res = dev_open(slave_dev, extack);
1908
	if (res) {
1909
		slave_err(bond_dev, slave_dev, "Opening slave failed\n");
1910
		goto err_restore_mac;
L
Linus Torvalds 已提交
1911 1912
	}

1913
	slave_dev->priv_flags |= IFF_BONDING;
1914 1915
	/* initialize slave stats */
	dev_get_stats(new_slave->dev, &new_slave->slave_stats);
L
Linus Torvalds 已提交
1916

1917
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
1918 1919 1920 1921
		/* bond_alb_init_slave() must be called before all other stages since
		 * it might fail and we do not want to have to undo everything
		 */
		res = bond_alb_init_slave(bond, new_slave);
S
Stephen Hemminger 已提交
1922
		if (res)
1923
			goto err_close;
L
Linus Torvalds 已提交
1924 1925
	}

1926 1927
	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
	if (res) {
1928
		slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
1929
		goto err_close;
1930
	}
L
Linus Torvalds 已提交
1931

1932
	prev_slave = bond_last_slave(bond);
L
Linus Torvalds 已提交
1933 1934 1935 1936

	new_slave->delay = 0;
	new_slave->link_failure_count = 0;

1937 1938
	if (bond_update_speed_duplex(new_slave) &&
	    bond_needs_speed_duplex(bond))
1939
		new_slave->link = BOND_LINK_DOWN;
1940

1941
	new_slave->last_rx = jiffies -
1942
		(msecs_to_jiffies(bond->params.arp_interval) + 1);
1943
	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1944
		new_slave->target_last_arp_rx[i] = new_slave->last_rx;
1945

L
Linus Torvalds 已提交
1946 1947 1948 1949
	if (bond->params.miimon && !bond->params.use_carrier) {
		link_reporting = bond_check_dev_link(bond, slave_dev, 1);

		if ((link_reporting == -1) && !bond->params.arp_interval) {
1950
			/* miimon is set but a bonded network driver
L
Linus Torvalds 已提交
1951 1952 1953 1954 1955 1956 1957
			 * does not support ETHTOOL/MII and
			 * arp_interval is not set.  Note: if
			 * use_carrier is enabled, we will never go
			 * here (because netif_carrier is always
			 * supported); thus, we don't need to change
			 * the messages for netif_carrier.
			 */
1958
			slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
L
Linus Torvalds 已提交
1959 1960
		} else if (link_reporting == -1) {
			/* unable get link status using mii/ethtool */
1961
			slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
L
Linus Torvalds 已提交
1962 1963 1964 1965
		}
	}

	/* check for initial state */
1966
	new_slave->link = BOND_LINK_NOCHANGE;
1967 1968 1969
	if (bond->params.miimon) {
		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
			if (bond->params.updelay) {
1970
				bond_set_slave_link_state(new_slave,
1971 1972
							  BOND_LINK_BACK,
							  BOND_SLAVE_NOTIFY_NOW);
1973 1974
				new_slave->delay = bond->params.updelay;
			} else {
1975
				bond_set_slave_link_state(new_slave,
1976 1977
							  BOND_LINK_UP,
							  BOND_SLAVE_NOTIFY_NOW);
1978
			}
L
Linus Torvalds 已提交
1979
		} else {
1980 1981
			bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1982
		}
1983
	} else if (bond->params.arp_interval) {
1984 1985
		bond_set_slave_link_state(new_slave,
					  (netif_carrier_ok(slave_dev) ?
1986 1987
					  BOND_LINK_UP : BOND_LINK_DOWN),
					  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1988
	} else {
1989 1990
		bond_set_slave_link_state(new_slave, BOND_LINK_UP,
					  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
1991 1992
	}

1993
	if (new_slave->link != BOND_LINK_DOWN)
1994
		new_slave->last_link_up = jiffies;
1995 1996 1997
	slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
		  new_slave->link == BOND_LINK_DOWN ? "DOWN" :
		  (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1998

1999
	if (bond_uses_primary(bond) && bond->params.primary[0]) {
L
Linus Torvalds 已提交
2000
		/* if there is a primary slave, remember it */
2001
		if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
2002
			rcu_assign_pointer(bond->primary_slave, new_slave);
2003 2004
			bond->force_primary = true;
		}
L
Linus Torvalds 已提交
2005 2006
	}

2007
	switch (BOND_MODE(bond)) {
L
Linus Torvalds 已提交
2008
	case BOND_MODE_ACTIVEBACKUP:
2009 2010
		bond_set_slave_inactive_flags(new_slave,
					      BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
2011 2012 2013 2014 2015 2016
		break;
	case BOND_MODE_8023AD:
		/* in 802.3ad mode, the internal mechanism
		 * will activate the slaves in the selected
		 * aggregator
		 */
2017
		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
2018
		/* if this is the first slave */
2019
		if (!prev_slave) {
2020
			SLAVE_AD_INFO(new_slave)->id = 1;
L
Linus Torvalds 已提交
2021 2022 2023
			/* Initialize AD with the number of times that the AD timer is called in 1 second
			 * can be called only after the mac address of the bond is set
			 */
2024
			bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
L
Linus Torvalds 已提交
2025
		} else {
2026 2027
			SLAVE_AD_INFO(new_slave)->id =
				SLAVE_AD_INFO(prev_slave)->id + 1;
L
Linus Torvalds 已提交
2028 2029 2030 2031 2032 2033
		}

		bond_3ad_bind_slave(new_slave);
		break;
	case BOND_MODE_TLB:
	case BOND_MODE_ALB:
J
Jiri Pirko 已提交
2034
		bond_set_active_slave(new_slave);
2035
		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
2036 2037
		break;
	default:
2038
		slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
L
Linus Torvalds 已提交
2039 2040

		/* always active in trunk mode */
J
Jiri Pirko 已提交
2041
		bond_set_active_slave(new_slave);
L
Linus Torvalds 已提交
2042 2043 2044 2045 2046

		/* In trunking mode there is little meaning to curr_active_slave
		 * anyway (it holds no special properties of the bond device),
		 * so we can change it without calling change_active_interface()
		 */
2047 2048
		if (!rcu_access_pointer(bond->curr_active_slave) &&
		    new_slave->link == BOND_LINK_UP)
2049
			rcu_assign_pointer(bond->curr_active_slave, new_slave);
S
Stephen Hemminger 已提交
2050

L
Linus Torvalds 已提交
2051 2052 2053
		break;
	} /* switch(bond_mode) */

2054
#ifdef CONFIG_NET_POLL_CONTROLLER
2055
	if (bond->dev->npinfo) {
2056
		if (slave_enable_netpoll(new_slave)) {
2057
			slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
2058
			res = -EBUSY;
2059
			goto err_detach;
2060
		}
2061 2062
	}
#endif
2063

2064 2065 2066
	if (!(bond_dev->features & NETIF_F_LRO))
		dev_disable_lro(slave_dev);

J
Jiri Pirko 已提交
2067 2068 2069
	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
					 new_slave);
	if (res) {
2070
		slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
2071
		goto err_detach;
J
Jiri Pirko 已提交
2072 2073
	}

2074
	res = bond_master_upper_dev_link(bond, new_slave, extack);
2075
	if (res) {
2076
		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
2077 2078 2079
		goto err_unregister;
	}

2080 2081
	bond_lower_state_changed(new_slave);

2082 2083
	res = bond_sysfs_slave_add(new_slave);
	if (res) {
2084
		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
2085 2086 2087
		goto err_upper_unlink;
	}

2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
	/* If the mode uses primary, then the following is handled by
	 * bond_change_active_slave().
	 */
	if (!bond_uses_primary(bond)) {
		/* set promiscuity level to new slave */
		if (bond_dev->flags & IFF_PROMISC) {
			res = dev_set_promiscuity(slave_dev, 1);
			if (res)
				goto err_sysfs_del;
		}

		/* set allmulti level to new slave */
		if (bond_dev->flags & IFF_ALLMULTI) {
			res = dev_set_allmulti(slave_dev, 1);
2102 2103 2104
			if (res) {
				if (bond_dev->flags & IFF_PROMISC)
					dev_set_promiscuity(slave_dev, -1);
2105
				goto err_sysfs_del;
2106
			}
2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
		}

		netif_addr_lock_bh(bond_dev);
		dev_mc_sync_multiple(slave_dev, bond_dev);
		dev_uc_sync_multiple(slave_dev, bond_dev);
		netif_addr_unlock_bh(bond_dev);

		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			/* add lacpdu mc addr to mc list */
			u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;

			dev_mc_add(slave_dev, lacpdu_multicast);
		}
	}

2122 2123 2124 2125
	bond->slave_cnt++;
	bond_compute_features(bond);
	bond_set_carrier(bond);

2126
	if (bond_uses_primary(bond)) {
2127
		block_netpoll_tx();
2128
		bond_select_active_slave(bond);
2129
		unblock_netpoll_tx();
2130
	}
2131

2132
	if (bond_mode_can_use_xmit_hash(bond))
2133 2134
		bond_update_slave_arr(bond, NULL);

2135

2136 2137 2138
	slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
		   bond_is_active_slave(new_slave) ? "an active" : "a backup",
		   new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
L
Linus Torvalds 已提交
2139 2140

	/* enslave is successful */
2141
	bond_queue_slave_event(new_slave);
L
Linus Torvalds 已提交
2142 2143 2144
	return 0;

/* Undo stages on error */
2145 2146 2147
err_sysfs_del:
	bond_sysfs_slave_del(new_slave);

2148
err_upper_unlink:
2149
	bond_upper_dev_unlink(bond, new_slave);
2150

2151 2152 2153
err_unregister:
	netdev_rx_handler_unregister(slave_dev);

2154
err_detach:
2155
	vlan_vids_del_by_dev(slave_dev, bond_dev);
2156 2157
	if (rcu_access_pointer(bond->primary_slave) == new_slave)
		RCU_INIT_POINTER(bond->primary_slave, NULL);
2158
	if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
2159
		block_netpoll_tx();
2160
		bond_change_active_slave(bond, NULL);
2161
		bond_select_active_slave(bond);
2162
		unblock_netpoll_tx();
2163
	}
2164 2165
	/* either primary_slave or curr_active_slave might've changed */
	synchronize_rcu();
2166
	slave_disable_netpoll(new_slave);
2167

L
Linus Torvalds 已提交
2168
err_close:
2169 2170
	if (!netif_is_bond_master(slave_dev))
		slave_dev->priv_flags &= ~IFF_BONDING;
L
Linus Torvalds 已提交
2171 2172 2173
	dev_close(slave_dev);

err_restore_mac:
2174
	slave_dev->flags &= ~IFF_SLAVE;
2175
	if (!bond->params.fail_over_mac ||
2176
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2177 2178 2179 2180
		/* XXX TODO - fom follow mode needs to change master's
		 * MAC if this slave's MAC is in use by the bond, or at
		 * least print a warning.
		 */
2181 2182 2183
		bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
				  new_slave->dev->addr_len);
		ss.ss_family = slave_dev->type;
2184
		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2185
	}
L
Linus Torvalds 已提交
2186

2187 2188 2189
err_restore_mtu:
	dev_set_mtu(slave_dev, new_slave->original_mtu);

L
Linus Torvalds 已提交
2190
err_free:
2191
	kobject_put(&new_slave->kobj);
L
Linus Torvalds 已提交
2192 2193

err_undo_flags:
2194
	/* Enslave of first slave has failed and we need to fix master's mac */
2195 2196 2197 2198 2199
	if (!bond_has_slaves(bond)) {
		if (ether_addr_equal_64bits(bond_dev->dev_addr,
					    slave_dev->dev_addr))
			eth_hw_addr_random(bond_dev);
		if (bond_dev->type != ARPHRD_ETHER) {
2200
			dev_close(bond_dev);
2201 2202 2203 2204 2205
			ether_setup(bond_dev);
			bond_dev->flags |= IFF_MASTER;
			bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
		}
	}
S
Stephen Hemminger 已提交
2206

L
Linus Torvalds 已提交
2207 2208 2209
	return res;
}

2210
/* Try to release the slave device <slave> from the bond device <master>
L
Linus Torvalds 已提交
2211
 * It is legal to access curr_active_slave without a lock because all the function
2212
 * is RTNL-locked. If "all" is true it means that the function is being called
2213
 * while destroying a bond interface and all slaves are being released.
L
Linus Torvalds 已提交
2214 2215 2216 2217 2218 2219 2220
 *
 * The rules for slave state should be:
 *   for Active/Backup:
 *     Active stays on all backups go down
 *   for Bonded connections:
 *     The first up interface should be left on and all others downed.
 */
2221 2222
static int __bond_release_one(struct net_device *bond_dev,
			      struct net_device *slave_dev,
2223
			      bool all, bool unregister)
L
Linus Torvalds 已提交
2224
{
2225
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
2226
	struct slave *slave, *oldcurrent;
2227
	struct sockaddr_storage ss;
2228
	int old_flags = bond_dev->flags;
2229
	netdev_features_t old_features = bond_dev->features;
L
Linus Torvalds 已提交
2230 2231 2232

	/* slave is not a slave or master is not master of this slave */
	if (!(slave_dev->flags & IFF_SLAVE) ||
2233
	    !netdev_has_upper_dev(slave_dev, bond_dev)) {
2234
		slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
L
Linus Torvalds 已提交
2235 2236 2237
		return -EINVAL;
	}

2238
	block_netpoll_tx();
L
Linus Torvalds 已提交
2239 2240 2241 2242

	slave = bond_get_slave_by_dev(bond, slave_dev);
	if (!slave) {
		/* not a slave of this bond */
2243
		slave_info(bond_dev, slave_dev, "interface not enslaved\n");
2244
		unblock_netpoll_tx();
L
Linus Torvalds 已提交
2245 2246 2247
		return -EINVAL;
	}

2248 2249
	bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);

2250 2251
	bond_sysfs_slave_del(slave);

2252 2253 2254
	/* recompute stats just before removing the slave */
	bond_get_stats(bond->dev, &bond->bond_stats);

2255
	bond_upper_dev_unlink(bond, slave);
J
Jiri Pirko 已提交
2256 2257 2258 2259 2260
	/* unregister rx_handler early so bond_handle_frame wouldn't be called
	 * for this slave anymore.
	 */
	netdev_rx_handler_unregister(slave_dev);

2261
	if (BOND_MODE(bond) == BOND_MODE_8023AD)
L
Linus Torvalds 已提交
2262 2263
		bond_3ad_unbind_slave(slave);

2264
	if (bond_mode_can_use_xmit_hash(bond))
2265 2266
		bond_update_slave_arr(bond, slave);

2267 2268
	slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
		    bond_is_active_slave(slave) ? "active" : "backup");
L
Linus Torvalds 已提交
2269

2270
	oldcurrent = rcu_access_pointer(bond->curr_active_slave);
L
Linus Torvalds 已提交
2271

2272
	RCU_INIT_POINTER(bond->current_arp_slave, NULL);
L
Linus Torvalds 已提交
2273

2274
	if (!all && (!bond->params.fail_over_mac ||
2275
		     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
2276
		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
2277
		    bond_has_slaves(bond))
2278 2279
			slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
				   slave->perm_hwaddr);
2280 2281
	}

2282 2283
	if (rtnl_dereference(bond->primary_slave) == slave)
		RCU_INIT_POINTER(bond->primary_slave, NULL);
L
Linus Torvalds 已提交
2284

2285
	if (oldcurrent == slave)
L
Linus Torvalds 已提交
2286 2287
		bond_change_active_slave(bond, NULL);

2288
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
2289 2290 2291 2292 2293 2294 2295 2296
		/* Must be called only after the slave has been
		 * detached from the list and the curr_active_slave
		 * has been cleared (if our_slave == old_current),
		 * but before a new active slave is selected.
		 */
		bond_alb_deinit_slave(bond, slave);
	}

2297
	if (all) {
2298
		RCU_INIT_POINTER(bond->curr_active_slave, NULL);
2299
	} else if (oldcurrent == slave) {
2300
		/* Note that we hold RTNL over this sequence, so there
2301 2302 2303
		 * is no concern that another slave add/remove event
		 * will interfere.
		 */
L
Linus Torvalds 已提交
2304
		bond_select_active_slave(bond);
2305 2306
	}

2307
	if (!bond_has_slaves(bond)) {
2308
		bond_set_carrier(bond);
2309
		eth_hw_addr_random(bond_dev);
L
Linus Torvalds 已提交
2310 2311
	}

2312
	unblock_netpoll_tx();
2313
	synchronize_rcu();
2314
	bond->slave_cnt--;
L
Linus Torvalds 已提交
2315

2316
	if (!bond_has_slaves(bond)) {
2317
		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2318 2319
		call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
	}
2320

2321 2322 2323
	bond_compute_features(bond);
	if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
	    (old_features & NETIF_F_VLAN_CHALLENGED))
2324
		slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
2325

2326
	vlan_vids_del_by_dev(slave_dev, bond_dev);
L
Linus Torvalds 已提交
2327

2328
	/* If the mode uses primary, then this case was handled above by
2329
	 * bond_change_active_slave(..., NULL)
L
Linus Torvalds 已提交
2330
	 */
2331
	if (!bond_uses_primary(bond)) {
2332 2333 2334 2335 2336 2337 2338 2339
		/* unset promiscuity level from slave
		 * NOTE: The NETDEV_CHANGEADDR call above may change the value
		 * of the IFF_PROMISC flag in the bond_dev, but we need the
		 * value of that flag before that change, as that was the value
		 * when this slave was attached, so we cache at the start of the
		 * function and use it here. Same goes for ALLMULTI below
		 */
		if (old_flags & IFF_PROMISC)
L
Linus Torvalds 已提交
2340 2341 2342
			dev_set_promiscuity(slave_dev, -1);

		/* unset allmulti level from slave */
2343
		if (old_flags & IFF_ALLMULTI)
L
Linus Torvalds 已提交
2344 2345
			dev_set_allmulti(slave_dev, -1);

2346
		bond_hw_addr_flush(bond_dev, slave_dev);
L
Linus Torvalds 已提交
2347 2348
	}

2349
	slave_disable_netpoll(slave);
2350

L
Linus Torvalds 已提交
2351 2352 2353
	/* close slave before restoring its mac address */
	dev_close(slave_dev);

2354
	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2355
	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2356
		/* restore original ("permanent") mac address */
2357 2358 2359
		bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
				  slave->dev->addr_len);
		ss.ss_family = slave_dev->type;
2360
		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2361
	}
L
Linus Torvalds 已提交
2362

2363 2364 2365 2366
	if (unregister)
		__dev_set_mtu(slave_dev, slave->original_mtu);
	else
		dev_set_mtu(slave_dev, slave->original_mtu);
2367

2368 2369
	if (!netif_is_bond_master(slave_dev))
		slave_dev->priv_flags &= ~IFF_BONDING;
L
Linus Torvalds 已提交
2370

2371
	kobject_put(&slave->kobj);
L
Linus Torvalds 已提交
2372

2373
	return 0;
L
Linus Torvalds 已提交
2374 2375
}

2376 2377 2378
/* A wrapper used because of ndo_del_link */
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
{
2379
	return __bond_release_one(bond_dev, slave_dev, false, false);
2380 2381
}

2382 2383 2384
/* First release a slave and then destroy the bond if no more slaves are left.
 * Must be under rtnl_lock when this function is called.
 */
2385 2386
static int bond_release_and_destroy(struct net_device *bond_dev,
				    struct net_device *slave_dev)
2387
{
2388
	struct bonding *bond = netdev_priv(bond_dev);
2389 2390
	int ret;

2391
	ret = __bond_release_one(bond_dev, slave_dev, false, true);
2392 2393
	if (ret == 0 && !bond_has_slaves(bond) &&
	    bond_dev->reg_state != NETREG_UNREGISTERING) {
2394
		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2395
		netdev_info(bond_dev, "Destroying bond\n");
2396
		bond_remove_proc_entry(bond);
S
Stephen Hemminger 已提交
2397
		unregister_netdevice(bond_dev);
2398 2399 2400 2401
	}
	return ret;
}

2402
static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
L
Linus Torvalds 已提交
2403
{
2404
	struct bonding *bond = netdev_priv(bond_dev);
2405

2406
	bond_fill_ifbond(bond, info);
L
Linus Torvalds 已提交
2407 2408 2409 2410
}

static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
2411
	struct bonding *bond = netdev_priv(bond_dev);
2412
	struct list_head *iter;
2413
	int i = 0, res = -ENODEV;
L
Linus Torvalds 已提交
2414 2415
	struct slave *slave;

2416
	bond_for_each_slave(bond, slave, iter) {
2417
		if (i++ == (int)info->slave_id) {
2418
			res = 0;
2419
			bond_fill_ifslave(slave, info);
L
Linus Torvalds 已提交
2420 2421 2422 2423
			break;
		}
	}

2424
	return res;
L
Linus Torvalds 已提交
2425 2426 2427 2428
}

/*-------------------------------- Monitoring -------------------------------*/

2429
/* called with rcu_read_lock() */
J
Jay Vosburgh 已提交
2430 2431
static int bond_miimon_inspect(struct bonding *bond)
{
2432
	int link_state, commit = 0;
2433
	struct list_head *iter;
J
Jay Vosburgh 已提交
2434
	struct slave *slave;
2435 2436
	bool ignore_updelay;

2437
	ignore_updelay = !rcu_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
2438

2439
	bond_for_each_slave_rcu(bond, slave, iter) {
2440
		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
L
Linus Torvalds 已提交
2441

J
Jay Vosburgh 已提交
2442
		link_state = bond_check_dev_link(bond, slave->dev, 0);
L
Linus Torvalds 已提交
2443 2444

		switch (slave->link) {
J
Jay Vosburgh 已提交
2445 2446 2447
		case BOND_LINK_UP:
			if (link_state)
				continue;
L
Linus Torvalds 已提交
2448

2449
			bond_propose_link_state(slave, BOND_LINK_FAIL);
2450
			commit++;
J
Jay Vosburgh 已提交
2451 2452
			slave->delay = bond->params.downdelay;
			if (slave->delay) {
2453 2454 2455 2456 2457 2458
				slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
					   (BOND_MODE(bond) ==
					    BOND_MODE_ACTIVEBACKUP) ?
					    (bond_is_active_slave(slave) ?
					     "active " : "backup ") : "",
					   bond->params.downdelay * bond->params.miimon);
L
Linus Torvalds 已提交
2459
			}
2460
			fallthrough;
J
Jay Vosburgh 已提交
2461 2462
		case BOND_LINK_FAIL:
			if (link_state) {
2463
				/* recovered before downdelay expired */
2464
				bond_propose_link_state(slave, BOND_LINK_UP);
2465
				slave->last_link_up = jiffies;
2466 2467 2468
				slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
					   (bond->params.downdelay - slave->delay) *
					   bond->params.miimon);
2469
				commit++;
J
Jay Vosburgh 已提交
2470
				continue;
L
Linus Torvalds 已提交
2471
			}
J
Jay Vosburgh 已提交
2472 2473

			if (slave->delay <= 0) {
2474
				bond_propose_link_state(slave, BOND_LINK_DOWN);
J
Jay Vosburgh 已提交
2475 2476
				commit++;
				continue;
L
Linus Torvalds 已提交
2477 2478
			}

J
Jay Vosburgh 已提交
2479 2480 2481 2482 2483 2484 2485
			slave->delay--;
			break;

		case BOND_LINK_DOWN:
			if (!link_state)
				continue;

2486
			bond_propose_link_state(slave, BOND_LINK_BACK);
2487
			commit++;
J
Jay Vosburgh 已提交
2488 2489 2490
			slave->delay = bond->params.updelay;

			if (slave->delay) {
2491 2492 2493 2494
				slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
					   ignore_updelay ? 0 :
					   bond->params.updelay *
					   bond->params.miimon);
J
Jay Vosburgh 已提交
2495
			}
2496
			fallthrough;
J
Jay Vosburgh 已提交
2497 2498
		case BOND_LINK_BACK:
			if (!link_state) {
2499
				bond_propose_link_state(slave, BOND_LINK_DOWN);
2500 2501 2502
				slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
					   (bond->params.updelay - slave->delay) *
					   bond->params.miimon);
2503
				commit++;
J
Jay Vosburgh 已提交
2504 2505 2506
				continue;
			}

2507 2508 2509
			if (ignore_updelay)
				slave->delay = 0;

J
Jay Vosburgh 已提交
2510
			if (slave->delay <= 0) {
2511
				bond_propose_link_state(slave, BOND_LINK_UP);
J
Jay Vosburgh 已提交
2512
				commit++;
2513
				ignore_updelay = false;
J
Jay Vosburgh 已提交
2514
				continue;
L
Linus Torvalds 已提交
2515
			}
J
Jay Vosburgh 已提交
2516 2517

			slave->delay--;
L
Linus Torvalds 已提交
2518
			break;
J
Jay Vosburgh 已提交
2519 2520
		}
	}
L
Linus Torvalds 已提交
2521

J
Jay Vosburgh 已提交
2522 2523
	return commit;
}
L
Linus Torvalds 已提交
2524

2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542
static void bond_miimon_link_change(struct bonding *bond,
				    struct slave *slave,
				    char link)
{
	switch (BOND_MODE(bond)) {
	case BOND_MODE_8023AD:
		bond_3ad_handle_link_change(slave, link);
		break;
	case BOND_MODE_TLB:
	case BOND_MODE_ALB:
		bond_alb_handle_link_change(bond, slave, link);
		break;
	case BOND_MODE_XOR:
		bond_update_slave_arr(bond, NULL);
		break;
	}
}

J
Jay Vosburgh 已提交
2543 2544
static void bond_miimon_commit(struct bonding *bond)
{
2545
	struct list_head *iter;
2546
	struct slave *slave, *primary;
J
Jay Vosburgh 已提交
2547

2548
	bond_for_each_slave(bond, slave, iter) {
2549
		switch (slave->link_new_state) {
J
Jay Vosburgh 已提交
2550
		case BOND_LINK_NOCHANGE:
2551 2552 2553 2554 2555 2556 2557 2558 2559
			/* For 802.3ad mode, check current slave speed and
			 * duplex again in case its port was disabled after
			 * invalid speed/duplex reporting but recovered before
			 * link monitoring could make a decision on the actual
			 * link status
			 */
			if (BOND_MODE(bond) == BOND_MODE_8023AD &&
			    slave->link == BOND_LINK_UP)
				bond_3ad_adapter_speed_duplex_changed(slave);
J
Jay Vosburgh 已提交
2560
			continue;
L
Linus Torvalds 已提交
2561

J
Jay Vosburgh 已提交
2562
		case BOND_LINK_UP:
2563 2564
			if (bond_update_speed_duplex(slave) &&
			    bond_needs_speed_duplex(bond)) {
2565
				slave->link = BOND_LINK_DOWN;
2566
				if (net_ratelimit())
2567 2568
					slave_warn(bond->dev, slave->dev,
						   "failed to get link speed/duplex\n");
2569 2570
				continue;
			}
2571 2572
			bond_set_slave_link_state(slave, BOND_LINK_UP,
						  BOND_SLAVE_NOTIFY_NOW);
2573
			slave->last_link_up = jiffies;
J
Jay Vosburgh 已提交
2574

2575
			primary = rtnl_dereference(bond->primary_slave);
2576
			if (BOND_MODE(bond) == BOND_MODE_8023AD) {
J
Jay Vosburgh 已提交
2577
				/* prevent it from being the active one */
J
Jiri Pirko 已提交
2578
				bond_set_backup_slave(slave);
2579
			} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
J
Jay Vosburgh 已提交
2580
				/* make it immediately active */
J
Jiri Pirko 已提交
2581
				bond_set_active_slave(slave);
L
Linus Torvalds 已提交
2582 2583
			}

2584 2585 2586
			slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
				   slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
				   slave->duplex ? "full" : "half");
L
Linus Torvalds 已提交
2587

2588
			bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2589

2590
			if (!bond->curr_active_slave || slave == primary)
J
Jay Vosburgh 已提交
2591
				goto do_failover;
L
Linus Torvalds 已提交
2592

J
Jay Vosburgh 已提交
2593
			continue;
2594

J
Jay Vosburgh 已提交
2595
		case BOND_LINK_DOWN:
J
Jay Vosburgh 已提交
2596 2597 2598
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;

2599 2600
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
L
Linus Torvalds 已提交
2601

2602 2603
			if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
			    BOND_MODE(bond) == BOND_MODE_8023AD)
2604 2605
				bond_set_slave_inactive_flags(slave,
							      BOND_SLAVE_NOTIFY_NOW);
J
Jay Vosburgh 已提交
2606

2607
			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
J
Jay Vosburgh 已提交
2608

2609
			bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2610

2611
			if (slave == rcu_access_pointer(bond->curr_active_slave))
J
Jay Vosburgh 已提交
2612 2613 2614 2615 2616
				goto do_failover;

			continue;

		default:
2617
			slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2618 2619
				  slave->link_new_state);
			bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
J
Jay Vosburgh 已提交
2620 2621 2622 2623 2624

			continue;
		}

do_failover:
2625
		block_netpoll_tx();
J
Jay Vosburgh 已提交
2626
		bond_select_active_slave(bond);
2627
		unblock_netpoll_tx();
J
Jay Vosburgh 已提交
2628 2629 2630
	}

	bond_set_carrier(bond);
L
Linus Torvalds 已提交
2631 2632
}

2633
/* bond_mii_monitor
2634 2635
 *
 * Really a wrapper that splits the mii monitor into two phases: an
J
Jay Vosburgh 已提交
2636 2637 2638
 * inspection, then (if inspection indicates something needs to be done)
 * an acquisition of appropriate locks followed by a commit phase to
 * implement whatever link state changes are indicated.
2639
 */
2640
static void bond_mii_monitor(struct work_struct *work)
2641 2642 2643
{
	struct bonding *bond = container_of(work, struct bonding,
					    mii_work.work);
2644
	bool should_notify_peers = false;
2645
	bool commit;
2646
	unsigned long delay;
2647 2648
	struct slave *slave;
	struct list_head *iter;
2649

2650 2651 2652
	delay = msecs_to_jiffies(bond->params.miimon);

	if (!bond_has_slaves(bond))
J
Jay Vosburgh 已提交
2653
		goto re_arm;
2654

2655
	rcu_read_lock();
2656
	should_notify_peers = bond_should_notify_peers(bond);
2657 2658 2659 2660 2661 2662 2663 2664
	commit = !!bond_miimon_inspect(bond);
	if (bond->send_peer_notif) {
		rcu_read_unlock();
		if (rtnl_trylock()) {
			bond->send_peer_notif--;
			rtnl_unlock();
		}
	} else {
2665
		rcu_read_unlock();
2666
	}
J
Jay Vosburgh 已提交
2667

2668
	if (commit) {
2669 2670 2671 2672 2673 2674
		/* Race avoidance with bond_close cancel of workqueue */
		if (!rtnl_trylock()) {
			delay = 1;
			should_notify_peers = false;
			goto re_arm;
		}
2675

2676 2677 2678
		bond_for_each_slave(bond, slave, iter) {
			bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
		}
2679 2680 2681
		bond_miimon_commit(bond);

		rtnl_unlock();	/* might sleep, hold no other locks */
2682
	}
2683

J
Jay Vosburgh 已提交
2684
re_arm:
2685
	if (bond->params.miimon)
2686 2687 2688 2689 2690 2691 2692 2693
		queue_delayed_work(bond->wq, &bond->mii_work, delay);

	if (should_notify_peers) {
		if (!rtnl_trylock())
			return;
		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
		rtnl_unlock();
	}
2694
}
J
Jay Vosburgh 已提交
2695

2696 2697
static int bond_upper_dev_walk(struct net_device *upper,
			       struct netdev_nested_priv *priv)
2698
{
2699
	__be32 ip = *(__be32 *)priv->data;
2700 2701 2702 2703

	return ip == bond_confirm_addr(upper, 0, ip);
}

2704
static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2705
{
2706 2707 2708
	struct netdev_nested_priv priv = {
		.data = (void *)&ip,
	};
2709
	bool ret = false;
2710

2711
	if (ip == bond_confirm_addr(bond->dev, 0, ip))
2712
		return true;
2713

2714
	rcu_read_lock();
2715
	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv))
2716
		ret = true;
2717
	rcu_read_unlock();
2718

2719
	return ret;
2720 2721
}

2722
/* We go to the (large) trouble of VLAN tagging ARP frames because
J
Jay Vosburgh 已提交
2723 2724 2725
 * switches in VLAN mode (especially if ports are configured as
 * "native" to a VLAN) might not pass non-tagged frames.
 */
2726 2727
static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
			  __be32 src_ip, struct bond_vlan_tag *tags)
J
Jay Vosburgh 已提交
2728 2729
{
	struct sk_buff *skb;
2730
	struct bond_vlan_tag *outer_tag = tags;
2731 2732
	struct net_device *slave_dev = slave->dev;
	struct net_device *bond_dev = slave->bond->dev;
J
Jay Vosburgh 已提交
2733

2734 2735
	slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
		  arp_op, &dest_ip, &src_ip);
S
Stephen Hemminger 已提交
2736

J
Jay Vosburgh 已提交
2737 2738 2739 2740
	skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
			 NULL, slave_dev->dev_addr, NULL);

	if (!skb) {
2741
		net_err_ratelimited("ARP packet allocation failed\n");
J
Jay Vosburgh 已提交
2742 2743
		return;
	}
2744

2745 2746 2747 2748 2749
	if (!tags || tags->vlan_proto == VLAN_N_VID)
		goto xmit;

	tags++;

2750
	/* Go through all the tags backwards and add them to the packet */
2751 2752 2753
	while (tags->vlan_proto != VLAN_N_VID) {
		if (!tags->vlan_id) {
			tags++;
2754
			continue;
2755
		}
2756

2757 2758
		slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
			  ntohs(outer_tag->vlan_proto), tags->vlan_id);
2759 2760
		skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
						tags->vlan_id);
2761 2762 2763 2764
		if (!skb) {
			net_err_ratelimited("failed to insert inner VLAN tag\n");
			return;
		}
2765 2766

		tags++;
2767 2768
	}
	/* Set the outer tag */
2769
	if (outer_tag->vlan_id) {
2770 2771
		slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
			  ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
J
Jiri Pirko 已提交
2772 2773
		__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
				       outer_tag->vlan_id);
J
Jay Vosburgh 已提交
2774
	}
2775 2776

xmit:
J
Jay Vosburgh 已提交
2777 2778 2779
	arp_xmit(skb);
}

2780 2781 2782 2783 2784 2785
/* Validate the device path between the @start_dev and the @end_dev.
 * The path is valid if the @end_dev is reachable through device
 * stacking.
 * When the path is validated, collect any vlan information in the
 * path.
 */
2786 2787 2788
struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
					      struct net_device *end_dev,
					      int level)
2789
{
2790
	struct bond_vlan_tag *tags;
2791 2792 2793
	struct net_device *upper;
	struct list_head  *iter;

2794
	if (start_dev == end_dev) {
K
Kees Cook 已提交
2795
		tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2796 2797 2798 2799 2800
		if (!tags)
			return ERR_PTR(-ENOMEM);
		tags[level].vlan_proto = VLAN_N_VID;
		return tags;
	}
2801 2802

	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2803 2804 2805 2806 2807
		tags = bond_verify_device_path(upper, end_dev, level + 1);
		if (IS_ERR_OR_NULL(tags)) {
			if (IS_ERR(tags))
				return tags;
			continue;
2808
		}
2809 2810 2811 2812 2813 2814
		if (is_vlan_dev(upper)) {
			tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
			tags[level].vlan_id = vlan_dev_vlan_id(upper);
		}

		return tags;
2815 2816
	}

2817
	return NULL;
2818
}
J
Jay Vosburgh 已提交
2819

L
Linus Torvalds 已提交
2820 2821
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
J
Jay Vosburgh 已提交
2822
	struct rtable *rt;
2823
	struct bond_vlan_tag *tags;
2824
	__be32 *targets = bond->params.arp_targets, addr;
2825
	int i;
L
Linus Torvalds 已提交
2826

2827
	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2828 2829
		slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
			  __func__, &targets[i]);
2830
		tags = NULL;
J
Jay Vosburgh 已提交
2831

2832
		/* Find out through which dev should the packet go */
2833 2834
		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
				     RTO_ONLINK, 0);
2835
		if (IS_ERR(rt)) {
2836 2837 2838
			/* there's no route to target - try to send arp
			 * probe to generate any traffic (arp_validate=0)
			 */
2839 2840 2841 2842
			if (bond->params.arp_validate)
				net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
						     bond->dev->name,
						     &targets[i]);
2843
			bond_arp_send(slave, ARPOP_REQUEST, targets[i],
2844
				      0, tags);
J
Jay Vosburgh 已提交
2845 2846 2847
			continue;
		}

2848 2849 2850 2851 2852
		/* bond device itself */
		if (rt->dst.dev == bond->dev)
			goto found;

		rcu_read_lock();
2853
		tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
2854
		rcu_read_unlock();
J
Jay Vosburgh 已提交
2855

2856
		if (!IS_ERR_OR_NULL(tags))
2857 2858
			goto found;

2859
		/* Not our device - skip */
2860
		slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
2861
			   &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
2862

2863
		ip_rt_put(rt);
2864 2865 2866 2867 2868
		continue;

found:
		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
		ip_rt_put(rt);
2869
		bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
2870
		kfree(tags);
J
Jay Vosburgh 已提交
2871 2872 2873
	}
}

2874
static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2875
{
2876 2877
	int i;

2878
	if (!sip || !bond_has_this_ip(bond, tip)) {
2879 2880
		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
			   __func__, &sip, &tip);
2881 2882
		return;
	}
2883

2884 2885
	i = bond_get_targets_ip(bond->params.arp_targets, sip);
	if (i == -1) {
2886 2887
		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
			   __func__, &sip);
2888
		return;
2889
	}
2890
	slave->last_rx = jiffies;
2891
	slave->target_last_arp_rx[i] = jiffies;
2892 2893
}

2894 2895
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
		 struct slave *slave)
2896
{
2897
	struct arphdr *arp = (struct arphdr *)skb->data;
2898
	struct slave *curr_active_slave, *curr_arp_slave;
2899
	unsigned char *arp_ptr;
2900
	__be32 sip, tip;
2901 2902
	int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
	unsigned int alen;
2903

2904
	if (!slave_do_arp_validate(bond, slave)) {
2905 2906
		if ((slave_do_arp_validate_only(bond) && is_arp) ||
		    !slave_do_arp_validate_only(bond))
2907
			slave->last_rx = jiffies;
2908
		return RX_HANDLER_ANOTHER;
2909 2910 2911
	} else if (!is_arp) {
		return RX_HANDLER_ANOTHER;
	}
2912

2913
	alen = arp_hdr_len(bond->dev);
2914

2915 2916
	slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
		   __func__, skb->dev->name);
2917

2918 2919 2920 2921 2922 2923 2924
	if (alen > skb_headlen(skb)) {
		arp = kmalloc(alen, GFP_ATOMIC);
		if (!arp)
			goto out_unlock;
		if (skb_copy_bits(skb, 0, arp, alen) < 0)
			goto out_unlock;
	}
2925

2926
	if (arp->ar_hln != bond->dev->addr_len ||
2927 2928 2929 2930 2931 2932 2933 2934
	    skb->pkt_type == PACKET_OTHERHOST ||
	    skb->pkt_type == PACKET_LOOPBACK ||
	    arp->ar_hrd != htons(ARPHRD_ETHER) ||
	    arp->ar_pro != htons(ETH_P_IP) ||
	    arp->ar_pln != 4)
		goto out_unlock;

	arp_ptr = (unsigned char *)(arp + 1);
2935
	arp_ptr += bond->dev->addr_len;
2936
	memcpy(&sip, arp_ptr, 4);
2937
	arp_ptr += 4 + bond->dev->addr_len;
2938 2939
	memcpy(&tip, arp_ptr, 4);

2940 2941 2942 2943
	slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
		  __func__, slave->dev->name, bond_slave_state(slave),
		  bond->params.arp_validate, slave_do_arp_validate(bond, slave),
		  &sip, &tip);
2944

2945
	curr_active_slave = rcu_dereference(bond->curr_active_slave);
2946
	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2947

2948
	/* We 'trust' the received ARP enough to validate it if:
2949
	 *
2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968
	 * (a) the slave receiving the ARP is active (which includes the
	 * current ARP slave, if any), or
	 *
	 * (b) the receiving slave isn't active, but there is a currently
	 * active slave and it received valid arp reply(s) after it became
	 * the currently active slave, or
	 *
	 * (c) there is an ARP slave that sent an ARP during the prior ARP
	 * interval, and we receive an ARP reply on any slave.  We accept
	 * these because switch FDB update delays may deliver the ARP
	 * reply to a slave other than the sender of the ARP request.
	 *
	 * Note: for (b), backup slaves are receiving the broadcast ARP
	 * request, not a reply.  This request passes from the sending
	 * slave through the L2 switch(es) to the receiving slave.  Since
	 * this is checking the request, sip/tip are swapped for
	 * validation.
	 *
	 * This is done to avoid endless looping when we can't reach the
2969
	 * arp_ip_target and fool ourselves with our own arp requests.
2970
	 */
J
Jiri Pirko 已提交
2971
	if (bond_is_active_slave(slave))
2972
		bond_validate_arp(bond, slave, sip, tip);
2973 2974 2975
	else if (curr_active_slave &&
		 time_after(slave_last_rx(bond, curr_active_slave),
			    curr_active_slave->last_link_up))
2976
		bond_validate_arp(bond, slave, tip, sip);
2977 2978 2979 2980
	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
		 bond_time_in_interval(bond,
				       dev_trans_start(curr_arp_slave->dev), 1))
		bond_validate_arp(bond, slave, sip, tip);
2981 2982

out_unlock:
2983 2984
	if (arp != (struct arphdr *)skb->data)
		kfree(arp);
2985
	return RX_HANDLER_ANOTHER;
2986 2987
}

2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001
/* function to verify if we're in the arp_interval timeslice, returns true if
 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
 */
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
				  int mod)
{
	int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);

	return time_in_range(jiffies,
			     last_act - delta_in_ticks,
			     last_act + mod * delta_in_ticks + delta_in_ticks/2);
}

3002
/* This function is called regularly to monitor each slave's link
L
Linus Torvalds 已提交
3003 3004 3005 3006 3007
 * ensuring that traffic is being sent and received when arp monitoring
 * is used in load-balancing mode. if the adapter has been dormant, then an
 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
 * arp monitoring in active backup mode.
 */
3008
static void bond_loadbalance_arp_mon(struct bonding *bond)
L
Linus Torvalds 已提交
3009 3010
{
	struct slave *slave, *oldcurrent;
3011
	struct list_head *iter;
3012
	int do_failover = 0, slave_state_changed = 0;
L
Linus Torvalds 已提交
3013

3014
	if (!bond_has_slaves(bond))
L
Linus Torvalds 已提交
3015 3016
		goto re_arm;

3017 3018
	rcu_read_lock();

3019
	oldcurrent = rcu_dereference(bond->curr_active_slave);
L
Linus Torvalds 已提交
3020 3021
	/* see if any of the previous devices are up now (i.e. they have
	 * xmt and rcv traffic). the curr_active_slave does not come into
3022 3023 3024
	 * the picture unless it is null. also, slave->last_link_up is not
	 * needed here because we send an arp on each slave and give a slave
	 * as long as it needs to get the tx/rx within the delta.
L
Linus Torvalds 已提交
3025 3026 3027
	 * TODO: what about up/down delay in arp mode? it wasn't here before
	 *       so it can wait
	 */
3028
	bond_for_each_slave_rcu(bond, slave, iter) {
3029 3030
		unsigned long trans_start = dev_trans_start(slave->dev);

3031
		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3032

L
Linus Torvalds 已提交
3033
		if (slave->link != BOND_LINK_UP) {
3034
			if (bond_time_in_interval(bond, trans_start, 1) &&
3035
			    bond_time_in_interval(bond, slave->last_rx, 1)) {
L
Linus Torvalds 已提交
3036

3037
				bond_propose_link_state(slave, BOND_LINK_UP);
3038
				slave_state_changed = 1;
L
Linus Torvalds 已提交
3039 3040 3041 3042 3043 3044 3045

				/* primary_slave has no meaning in round-robin
				 * mode. the window of a slave being up and
				 * curr_active_slave being null after enslaving
				 * is closed.
				 */
				if (!oldcurrent) {
3046
					slave_info(bond->dev, slave->dev, "link status definitely up\n");
L
Linus Torvalds 已提交
3047 3048
					do_failover = 1;
				} else {
3049
					slave_info(bond->dev, slave->dev, "interface is now up\n");
L
Linus Torvalds 已提交
3050 3051 3052 3053 3054 3055 3056 3057 3058
				}
			}
		} else {
			/* slave->link == BOND_LINK_UP */

			/* not all switches will respond to an arp request
			 * when the source ip is 0, so don't take the link down
			 * if we don't know our ip yet
			 */
3059
			if (!bond_time_in_interval(bond, trans_start, 2) ||
3060
			    !bond_time_in_interval(bond, slave->last_rx, 2)) {
L
Linus Torvalds 已提交
3061

3062
				bond_propose_link_state(slave, BOND_LINK_DOWN);
3063
				slave_state_changed = 1;
L
Linus Torvalds 已提交
3064

S
Stephen Hemminger 已提交
3065
				if (slave->link_failure_count < UINT_MAX)
L
Linus Torvalds 已提交
3066 3067
					slave->link_failure_count++;

3068
				slave_info(bond->dev, slave->dev, "interface is now down\n");
L
Linus Torvalds 已提交
3069

S
Stephen Hemminger 已提交
3070
				if (slave == oldcurrent)
L
Linus Torvalds 已提交
3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081
					do_failover = 1;
			}
		}

		/* note: if switch is in round-robin mode, all links
		 * must tx arp to ensure all links rx an arp - otherwise
		 * links may oscillate or not come up at all; if switch is
		 * in something like xor mode, there is nothing we can
		 * do - all replies will be rx'ed on same link causing slaves
		 * to be unstable during low/no traffic periods
		 */
3082
		if (bond_slave_is_up(slave))
L
Linus Torvalds 已提交
3083 3084 3085
			bond_arp_send_all(bond, slave);
	}

3086 3087
	rcu_read_unlock();

3088
	if (do_failover || slave_state_changed) {
3089 3090
		if (!rtnl_trylock())
			goto re_arm;
L
Linus Torvalds 已提交
3091

3092
		bond_for_each_slave(bond, slave, iter) {
3093 3094
			if (slave->link_new_state != BOND_LINK_NOCHANGE)
				slave->link = slave->link_new_state;
3095 3096
		}

3097 3098
		if (slave_state_changed) {
			bond_slave_state_change(bond);
3099 3100
			if (BOND_MODE(bond) == BOND_MODE_XOR)
				bond_update_slave_arr(bond, NULL);
3101 3102
		}
		if (do_failover) {
3103 3104 3105 3106
			block_netpoll_tx();
			bond_select_active_slave(bond);
			unblock_netpoll_tx();
		}
3107
		rtnl_unlock();
L
Linus Torvalds 已提交
3108 3109 3110
	}

re_arm:
3111
	if (bond->params.arp_interval)
3112 3113
		queue_delayed_work(bond->wq, &bond->arp_work,
				   msecs_to_jiffies(bond->params.arp_interval));
L
Linus Torvalds 已提交
3114 3115
}

3116
/* Called to inspect slaves for active-backup mode ARP monitor link state
3117 3118 3119
 * changes.  Sets proposed link state in slaves to specify what action
 * should take place for the slave.  Returns 0 if no changes are found, >0
 * if changes to link states must be committed.
3120
 *
3121
 * Called with rcu_read_lock held.
L
Linus Torvalds 已提交
3122
 */
3123
static int bond_ab_arp_inspect(struct bonding *bond)
L
Linus Torvalds 已提交
3124
{
3125
	unsigned long trans_start, last_rx;
3126
	struct list_head *iter;
3127 3128
	struct slave *slave;
	int commit = 0;
3129

3130
	bond_for_each_slave_rcu(bond, slave, iter) {
3131
		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3132
		last_rx = slave_last_rx(bond, slave);
L
Linus Torvalds 已提交
3133

3134
		if (slave->link != BOND_LINK_UP) {
3135
			if (bond_time_in_interval(bond, last_rx, 1)) {
3136
				bond_propose_link_state(slave, BOND_LINK_UP);
3137
				commit++;
3138 3139 3140
			} else if (slave->link == BOND_LINK_BACK) {
				bond_propose_link_state(slave, BOND_LINK_FAIL);
				commit++;
3141 3142 3143
			}
			continue;
		}
L
Linus Torvalds 已提交
3144

3145
		/* Give slaves 2*delta after being enslaved or made
3146 3147 3148
		 * active.  This avoids bouncing, as the last receive
		 * times need a full ARP monitor cycle to be updated.
		 */
3149
		if (bond_time_in_interval(bond, slave->last_link_up, 2))
3150 3151
			continue;

3152
		/* Backup slave is down if:
3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163
		 * - No current_arp_slave AND
		 * - more than 3*delta since last receive AND
		 * - the bond has an IP address
		 *
		 * Note: a non-null current_arp_slave indicates
		 * the curr_active_slave went down and we are
		 * searching for a new one; under this condition
		 * we only take the curr_active_slave down - this
		 * gives each slave a chance to tx/rx traffic
		 * before being taken out
		 */
J
Jiri Pirko 已提交
3164
		if (!bond_is_active_slave(slave) &&
3165
		    !rcu_access_pointer(bond->current_arp_slave) &&
3166
		    !bond_time_in_interval(bond, last_rx, 3)) {
3167
			bond_propose_link_state(slave, BOND_LINK_DOWN);
3168 3169 3170
			commit++;
		}

3171
		/* Active slave is down if:
3172 3173 3174 3175
		 * - more than 2*delta since transmitting OR
		 * - (more than 2*delta since receive AND
		 *    the bond has an IP address)
		 */
3176
		trans_start = dev_trans_start(slave->dev);
J
Jiri Pirko 已提交
3177
		if (bond_is_active_slave(slave) &&
3178 3179
		    (!bond_time_in_interval(bond, trans_start, 2) ||
		     !bond_time_in_interval(bond, last_rx, 2))) {
3180
			bond_propose_link_state(slave, BOND_LINK_DOWN);
3181 3182
			commit++;
		}
L
Linus Torvalds 已提交
3183 3184
	}

3185 3186
	return commit;
}
L
Linus Torvalds 已提交
3187

3188
/* Called to commit link state changes noted by inspection step of
3189 3190
 * active-backup mode ARP monitor.
 *
3191
 * Called with RTNL hold.
3192
 */
3193
static void bond_ab_arp_commit(struct bonding *bond)
3194
{
3195
	unsigned long trans_start;
3196
	struct list_head *iter;
3197
	struct slave *slave;
L
Linus Torvalds 已提交
3198

3199
	bond_for_each_slave(bond, slave, iter) {
3200
		switch (slave->link_new_state) {
3201 3202
		case BOND_LINK_NOCHANGE:
			continue;
3203

3204
		case BOND_LINK_UP:
3205
			trans_start = dev_trans_start(slave->dev);
3206 3207
			if (rtnl_dereference(bond->curr_active_slave) != slave ||
			    (!rtnl_dereference(bond->curr_active_slave) &&
3208
			     bond_time_in_interval(bond, trans_start, 1))) {
3209 3210 3211
				struct slave *current_arp_slave;

				current_arp_slave = rtnl_dereference(bond->current_arp_slave);
3212 3213
				bond_set_slave_link_state(slave, BOND_LINK_UP,
							  BOND_SLAVE_NOTIFY_NOW);
3214
				if (current_arp_slave) {
3215
					bond_set_slave_inactive_flags(
3216
						current_arp_slave,
3217
						BOND_SLAVE_NOTIFY_NOW);
3218
					RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3219
				}
3220

3221
				slave_info(bond->dev, slave->dev, "link status definitely up\n");
3222

3223
				if (!rtnl_dereference(bond->curr_active_slave) ||
3224
				    slave == rtnl_dereference(bond->primary_slave))
3225
					goto do_failover;
L
Linus Torvalds 已提交
3226

3227
			}
L
Linus Torvalds 已提交
3228

3229
			continue;
L
Linus Torvalds 已提交
3230

3231 3232 3233 3234
		case BOND_LINK_DOWN:
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;

3235 3236
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_NOW);
3237 3238
			bond_set_slave_inactive_flags(slave,
						      BOND_SLAVE_NOTIFY_NOW);
3239

3240
			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
3241

3242
			if (slave == rtnl_dereference(bond->curr_active_slave)) {
3243
				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3244
				goto do_failover;
L
Linus Torvalds 已提交
3245
			}
3246 3247

			continue;
3248

3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261
		case BOND_LINK_FAIL:
			bond_set_slave_link_state(slave, BOND_LINK_FAIL,
						  BOND_SLAVE_NOTIFY_NOW);
			bond_set_slave_inactive_flags(slave,
						      BOND_SLAVE_NOTIFY_NOW);

			/* A slave has just been enslaved and has become
			 * the current active slave.
			 */
			if (rtnl_dereference(bond->curr_active_slave))
				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
			continue;

3262
		default:
3263 3264 3265
			slave_err(bond->dev, slave->dev,
				  "impossible: link_new_state %d on slave\n",
				  slave->link_new_state);
3266
			continue;
L
Linus Torvalds 已提交
3267 3268
		}

3269
do_failover:
3270
		block_netpoll_tx();
3271
		bond_select_active_slave(bond);
3272
		unblock_netpoll_tx();
3273
	}
L
Linus Torvalds 已提交
3274

3275 3276
	bond_set_carrier(bond);
}
L
Linus Torvalds 已提交
3277

3278
/* Send ARP probes for active-backup mode ARP monitor.
3279
 *
3280
 * Called with rcu_read_lock held.
3281
 */
3282
static bool bond_ab_arp_probe(struct bonding *bond)
3283
{
3284
	struct slave *slave, *before = NULL, *new_slave = NULL,
3285 3286
		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
		     *curr_active_slave = rcu_dereference(bond->curr_active_slave);
3287 3288
	struct list_head *iter;
	bool found = false;
3289
	bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
3290

3291
	if (curr_arp_slave && curr_active_slave)
3292 3293 3294
		netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
			    curr_arp_slave->dev->name,
			    curr_active_slave->dev->name);
L
Linus Torvalds 已提交
3295

3296 3297
	if (curr_active_slave) {
		bond_arp_send_all(bond, curr_active_slave);
3298
		return should_notify_rtnl;
3299
	}
L
Linus Torvalds 已提交
3300

3301 3302 3303 3304
	/* if we don't have a curr_active_slave, search for the next available
	 * backup slave from the current_arp_slave and make it the candidate
	 * for becoming the curr_active_slave
	 */
L
Linus Torvalds 已提交
3305

3306
	if (!curr_arp_slave) {
3307 3308 3309
		curr_arp_slave = bond_first_slave_rcu(bond);
		if (!curr_arp_slave)
			return should_notify_rtnl;
3310
	}
L
Linus Torvalds 已提交
3311

3312
	bond_for_each_slave_rcu(bond, slave, iter) {
3313
		if (!found && !before && bond_slave_is_up(slave))
3314
			before = slave;
L
Linus Torvalds 已提交
3315

3316
		if (found && !new_slave && bond_slave_is_up(slave))
3317
			new_slave = slave;
3318 3319 3320 3321 3322 3323
		/* if the link state is up at this point, we
		 * mark it down - this can happen if we have
		 * simultaneous link failures and
		 * reselect_active_interface doesn't make this
		 * one the current slave so it is still marked
		 * up when it is actually down
L
Linus Torvalds 已提交
3324
		 */
3325
		if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3326 3327
			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
						  BOND_SLAVE_NOTIFY_LATER);
3328 3329
			if (slave->link_failure_count < UINT_MAX)
				slave->link_failure_count++;
L
Linus Torvalds 已提交
3330

3331
			bond_set_slave_inactive_flags(slave,
3332
						      BOND_SLAVE_NOTIFY_LATER);
3333

3334
			slave_info(bond->dev, slave->dev, "backup interface is now down\n");
L
Linus Torvalds 已提交
3335
		}
3336
		if (slave == curr_arp_slave)
3337
			found = true;
3338
	}
3339 3340 3341 3342

	if (!new_slave && before)
		new_slave = before;

3343 3344
	if (!new_slave)
		goto check_state;
3345

3346 3347
	bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
				  BOND_SLAVE_NOTIFY_LATER);
3348
	bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
3349
	bond_arp_send_all(bond, new_slave);
3350
	new_slave->last_link_up = jiffies;
3351
	rcu_assign_pointer(bond->current_arp_slave, new_slave);
3352

3353 3354
check_state:
	bond_for_each_slave_rcu(bond, slave, iter) {
3355
		if (slave->should_notify || slave->should_notify_link) {
3356 3357 3358 3359 3360
			should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
			break;
		}
	}
	return should_notify_rtnl;
3361
}
L
Linus Torvalds 已提交
3362

3363
static void bond_activebackup_arp_mon(struct bonding *bond)
3364
{
3365 3366
	bool should_notify_peers = false;
	bool should_notify_rtnl = false;
3367
	int delta_in_ticks;
L
Linus Torvalds 已提交
3368

3369 3370 3371
	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);

	if (!bond_has_slaves(bond))
3372 3373
		goto re_arm;

3374
	rcu_read_lock();
3375

3376 3377
	should_notify_peers = bond_should_notify_peers(bond);

3378 3379 3380
	if (bond_ab_arp_inspect(bond)) {
		rcu_read_unlock();

3381 3382 3383 3384 3385 3386
		/* Race avoidance with bond_close flush of workqueue */
		if (!rtnl_trylock()) {
			delta_in_ticks = 1;
			should_notify_peers = false;
			goto re_arm;
		}
3387

3388
		bond_ab_arp_commit(bond);
3389

3390
		rtnl_unlock();
3391
		rcu_read_lock();
3392 3393
	}

3394 3395
	should_notify_rtnl = bond_ab_arp_probe(bond);
	rcu_read_unlock();
3396

3397 3398
re_arm:
	if (bond->params.arp_interval)
3399 3400
		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);

3401
	if (should_notify_peers || should_notify_rtnl) {
3402 3403
		if (!rtnl_trylock())
			return;
3404 3405 3406 3407

		if (should_notify_peers)
			call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
						 bond->dev);
3408
		if (should_notify_rtnl) {
3409
			bond_slave_state_notify(bond);
3410 3411
			bond_slave_link_notify(bond);
		}
3412

3413 3414
		rtnl_unlock();
	}
L
Linus Torvalds 已提交
3415 3416
}

3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427
static void bond_arp_monitor(struct work_struct *work)
{
	struct bonding *bond = container_of(work, struct bonding,
					    arp_work.work);

	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
		bond_activebackup_arp_mon(bond);
	else
		bond_loadbalance_arp_mon(bond);
}

L
Linus Torvalds 已提交
3428 3429
/*-------------------------- netdev event handling --------------------------*/

3430
/* Change device name */
L
Linus Torvalds 已提交
3431 3432 3433 3434
static int bond_event_changename(struct bonding *bond)
{
	bond_remove_proc_entry(bond);
	bond_create_proc_entry(bond);
3435

3436 3437
	bond_debug_reregister(bond);

L
Linus Torvalds 已提交
3438 3439 3440
	return NOTIFY_DONE;
}

S
Stephen Hemminger 已提交
3441 3442
static int bond_master_netdev_event(unsigned long event,
				    struct net_device *bond_dev)
L
Linus Torvalds 已提交
3443
{
3444
	struct bonding *event_bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3445

3446 3447
	netdev_dbg(bond_dev, "%s called\n", __func__);

L
Linus Torvalds 已提交
3448 3449 3450
	switch (event) {
	case NETDEV_CHANGENAME:
		return bond_event_changename(event_bond);
3451 3452
	case NETDEV_UNREGISTER:
		bond_remove_proc_entry(event_bond);
M
Mahesh Bandewar 已提交
3453
#ifdef CONFIG_XFRM_OFFLOAD
3454
		xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
M
Mahesh Bandewar 已提交
3455
#endif /* CONFIG_XFRM_OFFLOAD */
3456 3457 3458 3459
		break;
	case NETDEV_REGISTER:
		bond_create_proc_entry(event_bond);
		break;
L
Linus Torvalds 已提交
3460 3461 3462 3463 3464 3465 3466
	default:
		break;
	}

	return NOTIFY_DONE;
}

S
Stephen Hemminger 已提交
3467 3468
static int bond_slave_netdev_event(unsigned long event,
				   struct net_device *slave_dev)
L
Linus Torvalds 已提交
3469
{
3470
	struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3471 3472
	struct bonding *bond;
	struct net_device *bond_dev;
L
Linus Torvalds 已提交
3473

3474 3475 3476 3477
	/* A netdev event can be generated while enslaving a device
	 * before netdev_rx_handler_register is called in which case
	 * slave will be NULL
	 */
3478 3479
	if (!slave) {
		netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
3480
		return NOTIFY_DONE;
3481 3482
	}

3483 3484
	bond_dev = slave->bond->dev;
	bond = slave->bond;
3485
	primary = rtnl_dereference(bond->primary_slave);
3486

3487 3488
	slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);

L
Linus Torvalds 已提交
3489 3490
	switch (event) {
	case NETDEV_UNREGISTER:
3491
		if (bond_dev->type != ARPHRD_ETHER)
3492 3493
			bond_release_and_destroy(bond_dev, slave_dev);
		else
3494
			__bond_release_one(bond_dev, slave_dev, false, true);
L
Linus Torvalds 已提交
3495
		break;
3496
	case NETDEV_UP:
L
Linus Torvalds 已提交
3497
	case NETDEV_CHANGE:
3498 3499
		/* For 802.3ad mode only:
		 * Getting invalid Speed/Duplex values here will put slave
3500 3501 3502 3503
		 * in weird state. Mark it as link-fail if the link was
		 * previously up or link-down if it hasn't yet come up, and
		 * let link-monitoring (miimon) set it right when correct
		 * speeds/duplex are available.
3504 3505
		 */
		if (bond_update_speed_duplex(slave) &&
3506 3507 3508 3509 3510 3511
		    BOND_MODE(bond) == BOND_MODE_8023AD) {
			if (slave->last_link_up)
				slave->link = BOND_LINK_FAIL;
			else
				slave->link = BOND_LINK_DOWN;
		}
3512

3513 3514
		if (BOND_MODE(bond) == BOND_MODE_8023AD)
			bond_3ad_adapter_speed_duplex_changed(slave);
3515
		fallthrough;
M
Mahesh Bandewar 已提交
3516
	case NETDEV_DOWN:
3517 3518 3519 3520 3521 3522 3523 3524
		/* Refresh slave-array if applicable!
		 * If the setup does not use miimon or arpmon (mode-specific!),
		 * then these events will not cause the slave-array to be
		 * refreshed. This will cause xmit to use a slave that is not
		 * usable. Avoid such situation by refeshing the array at these
		 * events. If these (miimon/arpmon) parameters are configured
		 * then array gets refreshed twice and that should be fine!
		 */
3525
		if (bond_mode_can_use_xmit_hash(bond))
3526
			bond_update_slave_arr(bond, NULL);
L
Linus Torvalds 已提交
3527 3528
		break;
	case NETDEV_CHANGEMTU:
3529
		/* TODO: Should slaves be allowed to
L
Linus Torvalds 已提交
3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541
		 * independently alter their MTU?  For
		 * an active-backup bond, slaves need
		 * not be the same type of device, so
		 * MTUs may vary.  For other modes,
		 * slaves arguably should have the
		 * same MTUs. To do this, we'd need to
		 * take over the slave's change_mtu
		 * function for the duration of their
		 * servitude.
		 */
		break;
	case NETDEV_CHANGENAME:
3542
		/* we don't care if we don't have primary set */
3543
		if (!bond_uses_primary(bond) ||
3544 3545 3546
		    !bond->params.primary[0])
			break;

3547
		if (slave == primary) {
3548
			/* slave's name changed - he's no longer primary */
3549
			RCU_INIT_POINTER(bond->primary_slave, NULL);
3550 3551
		} else if (!strcmp(slave_dev->name, bond->params.primary)) {
			/* we have a new primary slave */
3552
			rcu_assign_pointer(bond->primary_slave, slave);
3553 3554 3555 3556
		} else { /* we didn't change primary - exit */
			break;
		}

3557
		netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3558
			    primary ? slave_dev->name : "none");
3559 3560

		block_netpoll_tx();
3561
		bond_select_active_slave(bond);
3562
		unblock_netpoll_tx();
L
Linus Torvalds 已提交
3563
		break;
3564 3565 3566
	case NETDEV_FEAT_CHANGE:
		bond_compute_features(bond);
		break;
3567 3568 3569 3570
	case NETDEV_RESEND_IGMP:
		/* Propagate to master device */
		call_netdevice_notifiers(event, slave->bond->dev);
		break;
L
Linus Torvalds 已提交
3571 3572 3573 3574 3575 3576 3577
	default:
		break;
	}

	return NOTIFY_DONE;
}

3578
/* bond_netdev_event: handle netdev notifier chain events.
L
Linus Torvalds 已提交
3579 3580
 *
 * This function receives events for the netdev chain.  The caller (an
3581
 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
L
Linus Torvalds 已提交
3582 3583 3584
 * locks for us to safely manipulate the slave devices (RTNL lock,
 * dev_probe_lock).
 */
S
Stephen Hemminger 已提交
3585 3586
static int bond_netdev_event(struct notifier_block *this,
			     unsigned long event, void *ptr)
L
Linus Torvalds 已提交
3587
{
3588
	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
L
Linus Torvalds 已提交
3589

3590 3591
	netdev_dbg(event_dev, "%s received %s\n",
		   __func__, netdev_cmd_to_name(event));
L
Linus Torvalds 已提交
3592

3593 3594 3595
	if (!(event_dev->priv_flags & IFF_BONDING))
		return NOTIFY_DONE;

L
Linus Torvalds 已提交
3596
	if (event_dev->flags & IFF_MASTER) {
3597 3598 3599 3600 3601
		int ret;

		ret = bond_master_netdev_event(event, event_dev);
		if (ret != NOTIFY_DONE)
			return ret;
L
Linus Torvalds 已提交
3602 3603
	}

3604
	if (event_dev->flags & IFF_SLAVE)
L
Linus Torvalds 已提交
3605 3606 3607 3608 3609 3610 3611 3612 3613
		return bond_slave_netdev_event(event, event_dev);

	return NOTIFY_DONE;
}

static struct notifier_block bond_netdev_notifier = {
	.notifier_call = bond_netdev_event,
};

3614 3615
/*---------------------------- Hashing Policies -----------------------------*/

3616 3617
/* L2 hash helper */
static inline u32 bond_eth_hash(struct sk_buff *skb)
3618
{
3619
	struct ethhdr *ep, hdr_tmp;
3620

3621 3622 3623
	ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
	if (ep)
		return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
3624 3625 3626
	return 0;
}

M
Matteo Croce 已提交
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657
static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk,
			 int *noff, int *proto, bool l34)
{
	const struct ipv6hdr *iph6;
	const struct iphdr *iph;

	if (skb->protocol == htons(ETH_P_IP)) {
		if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph))))
			return false;
		iph = (const struct iphdr *)(skb->data + *noff);
		iph_to_flow_copy_v4addrs(fk, iph);
		*noff += iph->ihl << 2;
		if (!ip_is_fragment(iph))
			*proto = iph->protocol;
	} else if (skb->protocol == htons(ETH_P_IPV6)) {
		if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph6))))
			return false;
		iph6 = (const struct ipv6hdr *)(skb->data + *noff);
		iph_to_flow_copy_v6addrs(fk, iph6);
		*noff += sizeof(*iph6);
		*proto = iph6->nexthdr;
	} else {
		return false;
	}

	if (l34 && *proto >= 0)
		fk->ports.ports = skb_flow_get_ports(skb, *noff, *proto);

	return true;
}

3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678
static u32 bond_vlan_srcmac_hash(struct sk_buff *skb)
{
	struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
	u32 srcmac_vendor = 0, srcmac_dev = 0;
	u16 vlan;
	int i;

	for (i = 0; i < 3; i++)
		srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];

	for (i = 3; i < ETH_ALEN; i++)
		srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];

	if (!skb_vlan_tag_present(skb))
		return srcmac_vendor ^ srcmac_dev;

	vlan = skb_vlan_tag_get(skb);

	return vlan ^ srcmac_vendor ^ srcmac_dev;
}

3679 3680 3681
/* Extract the appropriate headers based on bond's xmit policy */
static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
			      struct flow_keys *fk)
3682
{
M
Matteo Croce 已提交
3683
	bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
3684
	int noff, proto = -1;
3685

3686 3687 3688
	switch (bond->params.xmit_policy) {
	case BOND_XMIT_POLICY_ENCAP23:
	case BOND_XMIT_POLICY_ENCAP34:
3689 3690 3691
		memset(fk, 0, sizeof(*fk));
		return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
					  fk, NULL, 0, 0, 0, 0);
3692 3693
	default:
		break;
3694
	}
3695

3696
	fk->ports.ports = 0;
3697
	memset(&fk->icmp, 0, sizeof(fk->icmp));
3698
	noff = skb_network_offset(skb);
M
Matteo Croce 已提交
3699
	if (!bond_flow_ip(skb, fk, &noff, &proto, l34))
3700
		return false;
M
Matteo Croce 已提交
3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722

	/* ICMP error packets contains at least 8 bytes of the header
	 * of the packet which generated the error. Use this information
	 * to correlate ICMP error packets within the same flow which
	 * generated the error.
	 */
	if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
		skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data,
				      skb_transport_offset(skb),
				      skb_headlen(skb));
		if (proto == IPPROTO_ICMP) {
			if (!icmp_is_err(fk->icmp.type))
				return true;

			noff += sizeof(struct icmphdr);
		} else if (proto == IPPROTO_ICMPV6) {
			if (!icmpv6_is_err(fk->icmp.type))
				return true;

			noff += sizeof(struct icmp6hdr);
		}
		return bond_flow_ip(skb, fk, &noff, &proto, l34);
3723
	}
3724

3725
	return true;
3726 3727
}

3728 3729 3730 3731 3732 3733 3734 3735 3736 3737
static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
{
	hash ^= (__force u32)flow_get_u32_dst(flow) ^
		(__force u32)flow_get_u32_src(flow);
	hash ^= (hash >> 16);
	hash ^= (hash >> 8);
	/* discard lowest hash bit to deal with the common even ports pattern */
	return hash >> 1;
}

3738 3739 3740 3741 3742 3743 3744
/**
 * bond_xmit_hash - generate a hash value based on the xmit policy
 * @bond: bonding device
 * @skb: buffer to use for headers
 *
 * This function will extract the necessary headers from the skb buffer and use
 * them to generate a hash based on the xmit_policy set in the bonding device
3745
 */
3746
u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
3747
{
3748 3749
	struct flow_keys flow;
	u32 hash;
3750

E
Eric Dumazet 已提交
3751 3752 3753 3754
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
	    skb->l4_hash)
		return skb->hash;

3755 3756 3757
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
		return bond_vlan_srcmac_hash(skb);

3758 3759
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
	    !bond_flow_dissect(bond, skb, &flow))
3760
		return bond_eth_hash(skb);
3761

3762
	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
3763
	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
3764
		hash = bond_eth_hash(skb);
3765 3766 3767 3768 3769 3770
	} else {
		if (flow.icmp.id)
			memcpy(&hash, &flow.icmp, sizeof(hash));
		else
			memcpy(&hash, &flow.ports.ports, sizeof(hash));
	}
3771

3772
	return bond_ip_hash(hash, &flow);
3773 3774
}

L
Linus Torvalds 已提交
3775 3776
/*-------------------------- Device entry points ----------------------------*/

3777
void bond_work_init_all(struct bonding *bond)
3778 3779 3780 3781 3782
{
	INIT_DELAYED_WORK(&bond->mcast_work,
			  bond_resend_igmp_join_requests_delayed);
	INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
	INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3783
	INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
3784
	INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3785
	INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
3786 3787 3788 3789 3790 3791 3792 3793 3794
}

static void bond_work_cancel_all(struct bonding *bond)
{
	cancel_delayed_work_sync(&bond->mii_work);
	cancel_delayed_work_sync(&bond->arp_work);
	cancel_delayed_work_sync(&bond->alb_work);
	cancel_delayed_work_sync(&bond->ad_work);
	cancel_delayed_work_sync(&bond->mcast_work);
3795
	cancel_delayed_work_sync(&bond->slave_arr_work);
3796 3797
}

L
Linus Torvalds 已提交
3798 3799
static int bond_open(struct net_device *bond_dev)
{
3800
	struct bonding *bond = netdev_priv(bond_dev);
3801
	struct list_head *iter;
3802
	struct slave *slave;
L
Linus Torvalds 已提交
3803

3804
	/* reset slave->backup and slave->inactive */
3805
	if (bond_has_slaves(bond)) {
3806
		bond_for_each_slave(bond, slave, iter) {
3807 3808
			if (bond_uses_primary(bond) &&
			    slave != rcu_access_pointer(bond->curr_active_slave)) {
3809 3810
				bond_set_slave_inactive_flags(slave,
							      BOND_SLAVE_NOTIFY_NOW);
3811
			} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
3812 3813
				bond_set_slave_active_flags(slave,
							    BOND_SLAVE_NOTIFY_NOW);
3814 3815 3816 3817
			}
		}
	}

3818
	if (bond_is_lb(bond)) {
L
Linus Torvalds 已提交
3819 3820 3821
		/* bond_alb_initialize must be called before the timer
		 * is started.
		 */
3822
		if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3823
			return -ENOMEM;
3824
		if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
3825
			queue_delayed_work(bond->wq, &bond->alb_work, 0);
L
Linus Torvalds 已提交
3826 3827
	}

3828
	if (bond->params.miimon)  /* link check interval, in milliseconds. */
3829
		queue_delayed_work(bond->wq, &bond->mii_work, 0);
L
Linus Torvalds 已提交
3830 3831

	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
3832
		queue_delayed_work(bond->wq, &bond->arp_work, 0);
3833
		bond->recv_probe = bond_arp_rcv;
L
Linus Torvalds 已提交
3834 3835
	}

3836
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3837
		queue_delayed_work(bond->wq, &bond->ad_work, 0);
L
Linus Torvalds 已提交
3838
		/* register to receive LACPDUs */
3839
		bond->recv_probe = bond_3ad_lacpdu_recv;
3840
		bond_3ad_initiate_agg_selection(bond, 1);
L
Linus Torvalds 已提交
3841 3842
	}

3843
	if (bond_mode_can_use_xmit_hash(bond))
3844 3845
		bond_update_slave_arr(bond, NULL);

L
Linus Torvalds 已提交
3846 3847 3848 3849 3850
	return 0;
}

static int bond_close(struct net_device *bond_dev)
{
3851
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3852

3853
	bond_work_cancel_all(bond);
3854
	bond->send_peer_notif = 0;
3855
	if (bond_is_lb(bond))
L
Linus Torvalds 已提交
3856
		bond_alb_deinitialize(bond);
3857
	bond->recv_probe = NULL;
L
Linus Torvalds 已提交
3858 3859 3860 3861

	return 0;
}

E
Eric Dumazet 已提交
3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876
/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
 * that some drivers can provide 32bit values only.
 */
static void bond_fold_stats(struct rtnl_link_stats64 *_res,
			    const struct rtnl_link_stats64 *_new,
			    const struct rtnl_link_stats64 *_old)
{
	const u64 *new = (const u64 *)_new;
	const u64 *old = (const u64 *)_old;
	u64 *res = (u64 *)_res;
	int i;

	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
		u64 nv = new[i];
		u64 ov = old[i];
3877
		s64 delta = nv - ov;
E
Eric Dumazet 已提交
3878 3879 3880

		/* detects if this particular field is 32bit only */
		if (((nv | ov) >> 32) == 0)
3881 3882 3883 3884 3885 3886 3887
			delta = (s64)(s32)((u32)nv - (u32)ov);

		/* filter anomalies, some drivers reset their stats
		 * at down/up events.
		 */
		if (delta > 0)
			res[i] += delta;
E
Eric Dumazet 已提交
3888 3889 3890
	}
}

3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931
#ifdef CONFIG_LOCKDEP
static int bond_get_lowest_level_rcu(struct net_device *dev)
{
	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
	int cur = 0, max = 0;

	now = dev;
	iter = &dev->adj_list.lower;

	while (1) {
		next = NULL;
		while (1) {
			ldev = netdev_next_lower_dev_rcu(now, &iter);
			if (!ldev)
				break;

			next = ldev;
			niter = &ldev->adj_list.lower;
			dev_stack[cur] = now;
			iter_stack[cur++] = iter;
			if (max <= cur)
				max = cur;
			break;
		}

		if (!next) {
			if (!cur)
				return max;
			next = dev_stack[--cur];
			niter = iter_stack[cur];
		}

		now = next;
		iter = niter;
	}

	return max;
}
#endif

3932 3933
static void bond_get_stats(struct net_device *bond_dev,
			   struct rtnl_link_stats64 *stats)
L
Linus Torvalds 已提交
3934
{
3935
	struct bonding *bond = netdev_priv(bond_dev);
3936
	struct rtnl_link_stats64 temp;
3937
	struct list_head *iter;
L
Linus Torvalds 已提交
3938
	struct slave *slave;
3939
	int nest_level = 0;
L
Linus Torvalds 已提交
3940 3941


E
Eric Dumazet 已提交
3942
	rcu_read_lock();
3943 3944 3945 3946 3947 3948 3949
#ifdef CONFIG_LOCKDEP
	nest_level = bond_get_lowest_level_rcu(bond_dev);
#endif

	spin_lock_nested(&bond->stats_lock, nest_level);
	memcpy(stats, &bond->bond_stats, sizeof(*stats));

E
Eric Dumazet 已提交
3950 3951
	bond_for_each_slave_rcu(bond, slave, iter) {
		const struct rtnl_link_stats64 *new =
3952
			dev_get_stats(slave->dev, &temp);
E
Eric Dumazet 已提交
3953 3954

		bond_fold_stats(stats, new, &slave->slave_stats);
3955 3956

		/* save off the slave stats for the next run */
E
Eric Dumazet 已提交
3957
		memcpy(&slave->slave_stats, new, sizeof(*new));
3958
	}
E
Eric Dumazet 已提交
3959

3960
	memcpy(&bond->bond_stats, stats, sizeof(*stats));
E
Eric Dumazet 已提交
3961
	spin_unlock(&bond->stats_lock);
3962
	rcu_read_unlock();
L
Linus Torvalds 已提交
3963 3964
}

3965
static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
L
Linus Torvalds 已提交
3966
{
3967
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
3968
	struct mii_ioctl_data *mii = NULL;
3969
	int res;
L
Linus Torvalds 已提交
3970

3971
	netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
L
Linus Torvalds 已提交
3972 3973 3974 3975

	switch (cmd) {
	case SIOCGMIIPHY:
		mii = if_mii(ifr);
S
Stephen Hemminger 已提交
3976
		if (!mii)
L
Linus Torvalds 已提交
3977
			return -EINVAL;
S
Stephen Hemminger 已提交
3978

L
Linus Torvalds 已提交
3979
		mii->phy_id = 0;
3980
		fallthrough;
L
Linus Torvalds 已提交
3981
	case SIOCGMIIREG:
3982
		/* We do this again just in case we were called by SIOCGMIIREG
L
Linus Torvalds 已提交
3983 3984 3985
		 * instead of SIOCGMIIPHY.
		 */
		mii = if_mii(ifr);
S
Stephen Hemminger 已提交
3986
		if (!mii)
L
Linus Torvalds 已提交
3987
			return -EINVAL;
S
Stephen Hemminger 已提交
3988

L
Linus Torvalds 已提交
3989 3990
		if (mii->reg_num == 1) {
			mii->val_out = 0;
S
Stephen Hemminger 已提交
3991
			if (netif_carrier_ok(bond->dev))
L
Linus Torvalds 已提交
3992 3993 3994 3995
				mii->val_out = BMSR_LSTATUS;
		}

		return 0;
3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017
	default:
		res = -EOPNOTSUPP;
	}

	return res;
}

static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
	struct bonding *bond = netdev_priv(bond_dev);
	struct net_device *slave_dev = NULL;
	struct ifbond k_binfo;
	struct ifbond __user *u_binfo = NULL;
	struct ifslave k_sinfo;
	struct ifslave __user *u_sinfo = NULL;
	struct bond_opt_value newval;
	struct net *net;
	int res = 0;

	netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);

	switch (cmd) {
L
Linus Torvalds 已提交
4018 4019 4020
	case SIOCBONDINFOQUERY:
		u_binfo = (struct ifbond __user *)ifr->ifr_data;

S
Stephen Hemminger 已提交
4021
		if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
L
Linus Torvalds 已提交
4022 4023
			return -EFAULT;

4024 4025
		bond_info_query(bond_dev, &k_binfo);
		if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
S
Stephen Hemminger 已提交
4026
			return -EFAULT;
L
Linus Torvalds 已提交
4027

4028
		return 0;
L
Linus Torvalds 已提交
4029 4030 4031
	case SIOCBONDSLAVEINFOQUERY:
		u_sinfo = (struct ifslave __user *)ifr->ifr_data;

S
Stephen Hemminger 已提交
4032
		if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
L
Linus Torvalds 已提交
4033 4034 4035
			return -EFAULT;

		res = bond_slave_info_query(bond_dev, &k_sinfo);
S
Stephen Hemminger 已提交
4036 4037 4038
		if (res == 0 &&
		    copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
			return -EFAULT;
L
Linus Torvalds 已提交
4039 4040 4041 4042 4043 4044

		return res;
	default:
		break;
	}

4045 4046 4047
	net = dev_net(bond_dev);

	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
L
Linus Torvalds 已提交
4048 4049
		return -EPERM;

4050
	slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
L
Linus Torvalds 已提交
4051

4052
	slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
L
Linus Torvalds 已提交
4053

S
Stephen Hemminger 已提交
4054
	if (!slave_dev)
4055
		return -ENODEV;
L
Linus Torvalds 已提交
4056

4057 4058
	switch (cmd) {
	case SIOCBONDENSLAVE:
D
David Ahern 已提交
4059
		res = bond_enslave(bond_dev, slave_dev, NULL);
4060 4061 4062 4063 4064
		break;
	case SIOCBONDRELEASE:
		res = bond_release(bond_dev, slave_dev);
		break;
	case SIOCBONDSETHWADDR:
4065
		res = bond_set_dev_addr(bond_dev, slave_dev);
4066 4067
		break;
	case SIOCBONDCHANGEACTIVE:
4068
		bond_opt_initstr(&newval, slave_dev->name);
4069 4070
		res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
					    &newval);
4071 4072 4073
		break;
	default:
		res = -EOPNOTSUPP;
L
Linus Torvalds 已提交
4074 4075 4076 4077 4078
	}

	return res;
}

A
Arnd Bergmann 已提交
4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101
static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr,
			       void __user *data, int cmd)
{
	struct ifreq ifrdata = { .ifr_data = data };

	switch (cmd) {
	case BOND_INFO_QUERY_OLD:
		return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY);
	case BOND_SLAVE_INFO_QUERY_OLD:
		return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY);
	case BOND_ENSLAVE_OLD:
		return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE);
	case BOND_RELEASE_OLD:
		return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE);
	case BOND_SETHWADDR_OLD:
		return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR);
	case BOND_CHANGE_ACTIVE_OLD:
		return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE);
	}

	return -EOPNOTSUPP;
}

4102
static void bond_change_rx_flags(struct net_device *bond_dev, int change)
L
Linus Torvalds 已提交
4103
{
4104
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
4105

4106 4107 4108
	if (change & IFF_PROMISC)
		bond_set_promiscuity(bond,
				     bond_dev->flags & IFF_PROMISC ? 1 : -1);
S
Stephen Hemminger 已提交
4109

4110 4111 4112 4113
	if (change & IFF_ALLMULTI)
		bond_set_allmulti(bond,
				  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
}
L
Linus Torvalds 已提交
4114

4115
static void bond_set_rx_mode(struct net_device *bond_dev)
4116 4117
{
	struct bonding *bond = netdev_priv(bond_dev);
4118
	struct list_head *iter;
4119
	struct slave *slave;
L
Linus Torvalds 已提交
4120

4121
	rcu_read_lock();
4122
	if (bond_uses_primary(bond)) {
4123
		slave = rcu_dereference(bond->curr_active_slave);
4124 4125 4126 4127 4128
		if (slave) {
			dev_uc_sync(slave->dev, bond_dev);
			dev_mc_sync(slave->dev, bond_dev);
		}
	} else {
4129
		bond_for_each_slave_rcu(bond, slave, iter) {
4130 4131 4132
			dev_uc_sync_multiple(slave->dev, bond_dev);
			dev_mc_sync_multiple(slave->dev, bond_dev);
		}
L
Linus Torvalds 已提交
4133
	}
4134
	rcu_read_unlock();
L
Linus Torvalds 已提交
4135 4136
}

4137
static int bond_neigh_init(struct neighbour *n)
4138
{
4139 4140 4141
	struct bonding *bond = netdev_priv(n->dev);
	const struct net_device_ops *slave_ops;
	struct neigh_parms parms;
4142
	struct slave *slave;
E
Eric Dumazet 已提交
4143
	int ret = 0;
4144

E
Eric Dumazet 已提交
4145 4146
	rcu_read_lock();
	slave = bond_first_slave_rcu(bond);
4147
	if (!slave)
E
Eric Dumazet 已提交
4148
		goto out;
4149
	slave_ops = slave->dev->netdev_ops;
4150
	if (!slave_ops->ndo_neigh_setup)
E
Eric Dumazet 已提交
4151
		goto out;
4152

E
Eric Dumazet 已提交
4153 4154 4155 4156 4157 4158 4159 4160 4161
	/* TODO: find another way [1] to implement this.
	 * Passing a zeroed structure is fragile,
	 * but at least we do not pass garbage.
	 *
	 * [1] One way would be that ndo_neigh_setup() never touch
	 *     struct neigh_parms, but propagate the new neigh_setup()
	 *     back to ___neigh_create() / neigh_parms_alloc()
	 */
	memset(&parms, 0, sizeof(parms));
4162 4163
	ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);

E
Eric Dumazet 已提交
4164 4165
	if (ret)
		goto out;
4166

E
Eric Dumazet 已提交
4167 4168 4169 4170 4171
	if (parms.neigh_setup)
		ret = parms.neigh_setup(n);
out:
	rcu_read_unlock();
	return ret;
4172 4173
}

4174
/* The bonding ndo_neigh_setup is called at init time beofre any
4175 4176
 * slave exists. So we must declare proxy setup function which will
 * be used at run time to resolve the actual slave neigh param setup.
4177 4178 4179 4180
 *
 * It's also called by master devices (such as vlans) to setup their
 * underlying devices. In that case - do nothing, we're already set up from
 * our init.
4181 4182 4183 4184
 */
static int bond_neigh_setup(struct net_device *dev,
			    struct neigh_parms *parms)
{
4185 4186 4187
	/* modify only our neigh_parms */
	if (parms->dev == dev)
		parms->neigh_setup = bond_neigh_init;
4188 4189 4190 4191

	return 0;
}

4192
/* Change the MTU of all of a master's slaves to match the master */
L
Linus Torvalds 已提交
4193 4194
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
4195
	struct bonding *bond = netdev_priv(bond_dev);
4196
	struct slave *slave, *rollback_slave;
4197
	struct list_head *iter;
L
Linus Torvalds 已提交
4198 4199
	int res = 0;

4200
	netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
L
Linus Torvalds 已提交
4201

4202
	bond_for_each_slave(bond, slave, iter) {
4203
		slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
4204
			   slave, slave->dev->netdev_ops->ndo_change_mtu);
4205

L
Linus Torvalds 已提交
4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216
		res = dev_set_mtu(slave->dev, new_mtu);

		if (res) {
			/* If we failed to set the slave's mtu to the new value
			 * we must abort the operation even in ACTIVE_BACKUP
			 * mode, because if we allow the backup slaves to have
			 * different mtu values than the active slave we'll
			 * need to change their mtu when doing a failover. That
			 * means changing their mtu from timer context, which
			 * is probably not a good idea.
			 */
4217 4218
			slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
				  res, new_mtu);
L
Linus Torvalds 已提交
4219 4220 4221 4222 4223 4224 4225 4226 4227 4228
			goto unwind;
		}
	}

	bond_dev->mtu = new_mtu;

	return 0;

unwind:
	/* unwind from head to the slave that failed */
4229
	bond_for_each_slave(bond, rollback_slave, iter) {
L
Linus Torvalds 已提交
4230 4231
		int tmp_res;

4232 4233 4234 4235
		if (rollback_slave == slave)
			break;

		tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
4236 4237 4238
		if (tmp_res)
			slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
				  tmp_res);
L
Linus Torvalds 已提交
4239 4240 4241 4242 4243
	}

	return res;
}

4244
/* Change HW address
L
Linus Torvalds 已提交
4245 4246 4247 4248 4249 4250 4251
 *
 * Note that many devices must be down to change the HW address, and
 * downing the master releases all slaves.  We can make bonds full of
 * bonding devices to test this, however.
 */
static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
4252
	struct bonding *bond = netdev_priv(bond_dev);
4253
	struct slave *slave, *rollback_slave;
4254
	struct sockaddr_storage *ss = addr, tmp_ss;
4255
	struct list_head *iter;
L
Linus Torvalds 已提交
4256 4257
	int res = 0;

4258
	if (BOND_MODE(bond) == BOND_MODE_ALB)
4259 4260 4261
		return bond_alb_set_mac_address(bond_dev, addr);


4262
	netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
L
Linus Torvalds 已提交
4263

4264 4265
	/* If fail_over_mac is enabled, do nothing and return success.
	 * Returning an error causes ifenslave to fail.
4266
	 */
4267
	if (bond->params.fail_over_mac &&
4268
	    BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
4269
		return 0;
4270

4271
	if (!is_valid_ether_addr(ss->__data))
L
Linus Torvalds 已提交
4272 4273
		return -EADDRNOTAVAIL;

4274
	bond_for_each_slave(bond, slave, iter) {
4275 4276
		slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
			  __func__, slave);
4277
		res = dev_set_mac_address(slave->dev, addr, NULL);
L
Linus Torvalds 已提交
4278 4279 4280 4281 4282 4283 4284
		if (res) {
			/* TODO: consider downing the slave
			 * and retry ?
			 * User should expect communications
			 * breakage anyway until ARP finish
			 * updating, so...
			 */
4285 4286
			slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
				  __func__, res);
L
Linus Torvalds 已提交
4287 4288 4289 4290 4291
			goto unwind;
		}
	}

	/* success */
4292
	memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
L
Linus Torvalds 已提交
4293 4294 4295
	return 0;

unwind:
4296 4297
	memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
	tmp_ss.ss_family = bond_dev->type;
L
Linus Torvalds 已提交
4298 4299

	/* unwind from head to the slave that failed */
4300
	bond_for_each_slave(bond, rollback_slave, iter) {
L
Linus Torvalds 已提交
4301 4302
		int tmp_res;

4303 4304 4305
		if (rollback_slave == slave)
			break;

4306
		tmp_res = dev_set_mac_address(rollback_slave->dev,
4307
					      (struct sockaddr *)&tmp_ss, NULL);
L
Linus Torvalds 已提交
4308
		if (tmp_res) {
4309 4310
			slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
				   __func__, tmp_res);
L
Linus Torvalds 已提交
4311 4312 4313 4314 4315 4316
		}
	}

	return res;
}

4317
/**
4318
 * bond_get_slave_by_id - get xmit slave with slave_id
4319 4320 4321
 * @bond: bonding device that is transmitting
 * @slave_id: slave id up to slave_cnt-1 through which to transmit
 *
4322
 * This function tries to get slave with slave_id but in case
4323 4324
 * it fails, it tries to find the first available slave for transmission.
 */
4325 4326
static struct slave *bond_get_slave_by_id(struct bonding *bond,
					  int slave_id)
4327
{
4328
	struct list_head *iter;
4329 4330 4331 4332
	struct slave *slave;
	int i = slave_id;

	/* Here we start from the slave with slave_id */
4333
	bond_for_each_slave_rcu(bond, slave, iter) {
4334
		if (--i < 0) {
4335
			if (bond_slave_can_tx(slave))
4336
				return slave;
4337 4338 4339 4340 4341
		}
	}

	/* Here we start from the first slave up to slave_id */
	i = slave_id;
4342
	bond_for_each_slave_rcu(bond, slave, iter) {
4343 4344
		if (--i < 0)
			break;
4345
		if (bond_slave_can_tx(slave))
4346
			return slave;
4347 4348
	}
	/* no slave that can tx has been found */
4349
	return NULL;
4350 4351
}

4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362
/**
 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
 * @bond: bonding device to use
 *
 * Based on the value of the bonding device's packets_per_slave parameter
 * this function generates a slave id, which is usually used as the next
 * slave to transmit through.
 */
static u32 bond_rr_gen_slave_id(struct bonding *bond)
{
	u32 slave_id;
4363 4364
	struct reciprocal_value reciprocal_packets_per_slave;
	int packets_per_slave = bond->params.packets_per_slave;
4365 4366 4367 4368 4369 4370

	switch (packets_per_slave) {
	case 0:
		slave_id = prandom_u32();
		break;
	case 1:
4371
		slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4372 4373
		break;
	default:
4374 4375
		reciprocal_packets_per_slave =
			bond->params.reciprocal_packets_per_slave;
4376 4377
		slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
		slave_id = reciprocal_divide(slave_id,
4378
					     reciprocal_packets_per_slave);
4379 4380 4381 4382 4383 4384
		break;
	}

	return slave_id;
}

4385 4386
static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
						    struct sk_buff *skb)
L
Linus Torvalds 已提交
4387
{
4388
	struct slave *slave;
4389
	int slave_cnt;
4390
	u32 slave_id;
L
Linus Torvalds 已提交
4391

4392
	/* Start with the curr_active_slave that joined the bond as the
4393 4394 4395 4396
	 * default for sending IGMP traffic.  For failover purposes one
	 * needs to maintain some consistency for the interface that will
	 * send the join/membership reports.  The curr_active_slave found
	 * will send all of this type of traffic.
4397
	 */
4398 4399 4400
	if (skb->protocol == htons(ETH_P_IP)) {
		int noff = skb_network_offset(skb);
		struct iphdr *iph;
4401

4402 4403 4404 4405 4406 4407 4408
		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
			goto non_igmp;

		iph = ip_hdr(skb);
		if (iph->protocol == IPPROTO_IGMP) {
			slave = rcu_dereference(bond->curr_active_slave);
			if (slave)
4409 4410
				return slave;
			return bond_get_slave_by_id(bond, 0);
4411
		}
L
Linus Torvalds 已提交
4412
	}
4413

4414 4415 4416
non_igmp:
	slave_cnt = READ_ONCE(bond->slave_cnt);
	if (likely(slave_cnt)) {
4417 4418
		slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
		return bond_get_slave_by_id(bond, slave_id);
4419
	}
4420 4421 4422 4423 4424 4425 4426 4427 4428 4429
	return NULL;
}

static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
					struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);
	struct slave *slave;

	slave = bond_xmit_roundrobin_slave_get(bond, skb);
4430 4431 4432
	if (likely(slave))
		return bond_dev_queue_xmit(bond, skb, slave->dev);

4433
	return bond_tx_drop(bond_dev, skb);
L
Linus Torvalds 已提交
4434 4435
}

4436 4437 4438 4439 4440 4441
static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond,
						      struct sk_buff *skb)
{
	return rcu_dereference(bond->curr_active_slave);
}

4442
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
L
Linus Torvalds 已提交
4443 4444
 * the bond has a usable interface.
 */
4445 4446
static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
					  struct net_device *bond_dev)
L
Linus Torvalds 已提交
4447
{
4448
	struct bonding *bond = netdev_priv(bond_dev);
4449
	struct slave *slave;
L
Linus Torvalds 已提交
4450

4451
	slave = bond_xmit_activebackup_slave_get(bond, skb);
4452
	if (slave)
4453
		return bond_dev_queue_xmit(bond, skb, slave->dev);
4454

4455
	return bond_tx_drop(bond_dev, skb);
L
Linus Torvalds 已提交
4456 4457
}

4458 4459 4460
/* Use this to update slave_array when (a) it's not appropriate to update
 * slave_array right away (note that update_slave_array() may sleep)
 * and / or (b) RTNL is not held.
L
Linus Torvalds 已提交
4461
 */
4462
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
L
Linus Torvalds 已提交
4463
{
4464 4465
	queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
}
L
Linus Torvalds 已提交
4466

4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488
/* Slave array work handler. Holds only RTNL */
static void bond_slave_arr_handler(struct work_struct *work)
{
	struct bonding *bond = container_of(work, struct bonding,
					    slave_arr_work.work);
	int ret;

	if (!rtnl_trylock())
		goto err;

	ret = bond_update_slave_arr(bond, NULL);
	rtnl_unlock();
	if (ret) {
		pr_warn_ratelimited("Failed to update slave array from WT\n");
		goto err;
	}
	return;

err:
	bond_slave_arr_work_rearm(bond, 1);
}

4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511
static void bond_skip_slave(struct bond_up_slave *slaves,
			    struct slave *skipslave)
{
	int idx;

	/* Rare situation where caller has asked to skip a specific
	 * slave but allocation failed (most likely!). BTW this is
	 * only possible when the call is initiated from
	 * __bond_release_one(). In this situation; overwrite the
	 * skipslave entry in the array with the last entry from the
	 * array to avoid a situation where the xmit path may choose
	 * this to-be-skipped slave to send a packet out.
	 */
	for (idx = 0; slaves && idx < slaves->count; idx++) {
		if (skipslave == slaves->arr[idx]) {
			slaves->arr[idx] =
				slaves->arr[slaves->count - 1];
			slaves->count--;
			break;
		}
	}
}

M
Maor Gottlieb 已提交
4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543
static void bond_set_slave_arr(struct bonding *bond,
			       struct bond_up_slave *usable_slaves,
			       struct bond_up_slave *all_slaves)
{
	struct bond_up_slave *usable, *all;

	usable = rtnl_dereference(bond->usable_slaves);
	rcu_assign_pointer(bond->usable_slaves, usable_slaves);
	kfree_rcu(usable, rcu);

	all = rtnl_dereference(bond->all_slaves);
	rcu_assign_pointer(bond->all_slaves, all_slaves);
	kfree_rcu(all, rcu);
}

static void bond_reset_slave_arr(struct bonding *bond)
{
	struct bond_up_slave *usable, *all;

	usable = rtnl_dereference(bond->usable_slaves);
	if (usable) {
		RCU_INIT_POINTER(bond->usable_slaves, NULL);
		kfree_rcu(usable, rcu);
	}

	all = rtnl_dereference(bond->all_slaves);
	if (all) {
		RCU_INIT_POINTER(bond->all_slaves, NULL);
		kfree_rcu(all, rcu);
	}
}

4544 4545 4546 4547
/* Build the usable slaves array in control path for modes that use xmit-hash
 * to determine the slave interface -
 * (a) BOND_MODE_8023AD
 * (b) BOND_MODE_XOR
4548
 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
4549 4550 4551 4552 4553
 *
 * The caller is expected to hold RTNL only and NO other lock!
 */
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
{
M
Maor Gottlieb 已提交
4554
	struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
4555 4556 4557 4558 4559
	struct slave *slave;
	struct list_head *iter;
	int agg_id = 0;
	int ret = 0;

4560
	might_sleep();
4561

4562 4563
	usable_slaves = kzalloc(struct_size(usable_slaves, arr,
					    bond->slave_cnt), GFP_KERNEL);
M
Maor Gottlieb 已提交
4564 4565 4566
	all_slaves = kzalloc(struct_size(all_slaves, arr,
					 bond->slave_cnt), GFP_KERNEL);
	if (!usable_slaves || !all_slaves) {
4567 4568 4569 4570 4571 4572
		ret = -ENOMEM;
		goto out;
	}
	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
		struct ad_info ad_info;

4573
		spin_lock_bh(&bond->mode_lock);
4574
		if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
4575
			spin_unlock_bh(&bond->mode_lock);
4576 4577 4578 4579
			pr_debug("bond_3ad_get_active_agg_info failed\n");
			/* No active aggragator means it's not safe to use
			 * the previous array.
			 */
M
Maor Gottlieb 已提交
4580
			bond_reset_slave_arr(bond);
4581 4582
			goto out;
		}
4583
		spin_unlock_bh(&bond->mode_lock);
4584 4585 4586
		agg_id = ad_info.aggregator_id;
	}
	bond_for_each_slave(bond, slave, iter) {
M
Maor Gottlieb 已提交
4587 4588 4589 4590
		if (skipslave == slave)
			continue;

		all_slaves->arr[all_slaves->count++] = slave;
4591 4592 4593 4594 4595 4596 4597 4598 4599
		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
			struct aggregator *agg;

			agg = SLAVE_AD_INFO(slave)->port.aggregator;
			if (!agg || agg->aggregator_identifier != agg_id)
				continue;
		}
		if (!bond_slave_can_tx(slave))
			continue;
4600

4601
		slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
4602
			  usable_slaves->count);
4603

4604
		usable_slaves->arr[usable_slaves->count++] = slave;
4605 4606
	}

M
Maor Gottlieb 已提交
4607 4608
	bond_set_slave_arr(bond, usable_slaves, all_slaves);
	return ret;
4609 4610
out:
	if (ret != 0 && skipslave) {
M
Maor Gottlieb 已提交
4611 4612
		bond_skip_slave(rtnl_dereference(bond->all_slaves),
				skipslave);
4613 4614
		bond_skip_slave(rtnl_dereference(bond->usable_slaves),
				skipslave);
4615
	}
M
Maor Gottlieb 已提交
4616 4617
	kfree_rcu(all_slaves, rcu);
	kfree_rcu(usable_slaves, rcu);
4618

4619 4620 4621
	return ret;
}

4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638
static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
						 struct sk_buff *skb,
						 struct bond_up_slave *slaves)
{
	struct slave *slave;
	unsigned int count;
	u32 hash;

	hash = bond_xmit_hash(bond, skb);
	count = slaves ? READ_ONCE(slaves->count) : 0;
	if (unlikely(!count))
		return NULL;

	slave = slaves->arr[hash % count];
	return slave;
}

4639 4640 4641 4642
/* Use this Xmit function for 3AD as well as XOR modes. The current
 * usable slave array is formed in the control path. The xmit function
 * just calculates hash and sends the packet out.
 */
4643 4644
static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
				     struct net_device *dev)
4645 4646 4647
{
	struct bonding *bond = netdev_priv(dev);
	struct bond_up_slave *slaves;
4648
	struct slave *slave;
4649

4650
	slaves = rcu_dereference(bond->usable_slaves);
4651 4652
	slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
	if (likely(slave))
4653
		return bond_dev_queue_xmit(bond, skb, slave->dev);
4654

4655
	return bond_tx_drop(dev, skb);
L
Linus Torvalds 已提交
4656 4657
}

4658
/* in broadcast mode, we send everything to all usable interfaces. */
4659 4660
static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
				       struct net_device *bond_dev)
L
Linus Torvalds 已提交
4661
{
4662
	struct bonding *bond = netdev_priv(bond_dev);
4663
	struct slave *slave = NULL;
4664
	struct list_head *iter;
L
Linus Torvalds 已提交
4665

4666
	bond_for_each_slave_rcu(bond, slave, iter) {
4667 4668
		if (bond_is_last_slave(bond, slave))
			break;
4669
		if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
4670
			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
L
Linus Torvalds 已提交
4671

4672
			if (!skb2) {
4673 4674
				net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
						    bond_dev->name, __func__);
4675
				continue;
L
Linus Torvalds 已提交
4676
			}
4677
			bond_dev_queue_xmit(bond, skb2, slave->dev);
L
Linus Torvalds 已提交
4678 4679
		}
	}
4680
	if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
4681
		return bond_dev_queue_xmit(bond, skb, slave->dev);
S
Stephen Hemminger 已提交
4682

4683
	return bond_tx_drop(bond_dev, skb);
L
Linus Torvalds 已提交
4684 4685 4686 4687
}

/*------------------------- Device initialization ---------------------------*/

4688
/* Lookup the slave that corresponds to a qid */
4689 4690 4691 4692
static inline int bond_slave_override(struct bonding *bond,
				      struct sk_buff *skb)
{
	struct slave *slave = NULL;
4693
	struct list_head *iter;
4694

4695
	if (!skb_rx_queue_recorded(skb))
4696
		return 1;
4697 4698

	/* Find out if any slaves have the same mapping as this skb. */
4699
	bond_for_each_slave_rcu(bond, slave, iter) {
4700
		if (slave->queue_id == skb_get_queue_mapping(skb)) {
4701 4702
			if (bond_slave_is_up(slave) &&
			    slave->link == BOND_LINK_UP) {
4703 4704 4705 4706
				bond_dev_queue_xmit(bond, skb, slave->dev);
				return 0;
			}
			/* If the slave isn't UP, use default transmit policy. */
4707 4708 4709 4710
			break;
		}
	}

4711
	return 1;
4712 4713
}

4714

4715
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
4716
			     struct net_device *sb_dev)
4717
{
4718
	/* This helper function exists to help dev_pick_tx get the correct
P
Phil Oester 已提交
4719
	 * destination queue.  Using a helper function skips a call to
4720 4721 4722
	 * skb_tx_hash and will put the skbs in the queue we expect on their
	 * way down to the bonding driver.
	 */
P
Phil Oester 已提交
4723 4724
	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;

4725
	/* Save the original txq to restore before passing to the driver */
4726
	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
4727

P
Phil Oester 已提交
4728
	if (unlikely(txq >= dev->real_num_tx_queues)) {
4729
		do {
P
Phil Oester 已提交
4730
			txq -= dev->real_num_tx_queues;
4731
		} while (txq >= dev->real_num_tx_queues);
P
Phil Oester 已提交
4732 4733
	}
	return txq;
4734 4735
}

4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777
static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
					      struct sk_buff *skb,
					      bool all_slaves)
{
	struct bonding *bond = netdev_priv(master_dev);
	struct bond_up_slave *slaves;
	struct slave *slave = NULL;

	switch (BOND_MODE(bond)) {
	case BOND_MODE_ROUNDROBIN:
		slave = bond_xmit_roundrobin_slave_get(bond, skb);
		break;
	case BOND_MODE_ACTIVEBACKUP:
		slave = bond_xmit_activebackup_slave_get(bond, skb);
		break;
	case BOND_MODE_8023AD:
	case BOND_MODE_XOR:
		if (all_slaves)
			slaves = rcu_dereference(bond->all_slaves);
		else
			slaves = rcu_dereference(bond->usable_slaves);
		slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
		break;
	case BOND_MODE_BROADCAST:
		break;
	case BOND_MODE_ALB:
		slave = bond_xmit_alb_slave_get(bond, skb);
		break;
	case BOND_MODE_TLB:
		slave = bond_xmit_tlb_slave_get(bond, skb);
		break;
	default:
		/* Should never happen, mode already checked */
		WARN_ONCE(true, "Unknown bonding mode");
		break;
	}

	if (slave)
		return slave->dev;
	return NULL;
}

4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856
static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
{
	switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
	case AF_INET6:
		if (sk->sk_ipv6only ||
		    ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
			flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
			flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
			flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
			break;
		}
		fallthrough;
#endif
	default: /* AF_INET */
		flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
		flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
		flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
		break;
	}

	flow->ports.src = inet_sk(sk)->inet_sport;
	flow->ports.dst = inet_sk(sk)->inet_dport;
}

/**
 * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
 * @sk: socket to use for headers
 *
 * This function will extract the necessary field from the socket and use
 * them to generate a hash based on the LAYER34 xmit_policy.
 * Assumes that sk is a TCP or UDP socket.
 */
static u32 bond_sk_hash_l34(struct sock *sk)
{
	struct flow_keys flow;
	u32 hash;

	bond_sk_to_flow(sk, &flow);

	/* L4 */
	memcpy(&hash, &flow.ports.ports, sizeof(hash));
	/* L3 */
	return bond_ip_hash(hash, &flow);
}

static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
						  struct sock *sk)
{
	struct bond_up_slave *slaves;
	struct slave *slave;
	unsigned int count;
	u32 hash;

	slaves = rcu_dereference(bond->usable_slaves);
	count = slaves ? READ_ONCE(slaves->count) : 0;
	if (unlikely(!count))
		return NULL;

	hash = bond_sk_hash_l34(sk);
	slave = slaves->arr[hash % count];

	return slave->dev;
}

static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
						struct sock *sk)
{
	struct bonding *bond = netdev_priv(dev);
	struct net_device *lower = NULL;

	rcu_read_lock();
	if (bond_sk_check(bond))
		lower = __bond_sk_get_lower_dev(bond, sk);
	rcu_read_unlock();

	return lower;
}

4857 4858 4859 4860 4861 4862 4863 4864 4865 4866
#if IS_ENABLED(CONFIG_TLS_DEVICE)
static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
					struct net_device *dev)
{
	if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
		return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
	return bond_tx_drop(dev, skb);
}
#endif

4867
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4868
{
4869 4870
	struct bonding *bond = netdev_priv(dev);

4871 4872 4873
	if (bond_should_override_tx_queue(bond) &&
	    !bond_slave_override(bond, skb))
		return NETDEV_TX_OK;
4874

4875 4876 4877 4878 4879
#if IS_ENABLED(CONFIG_TLS_DEVICE)
	if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
		return bond_tls_device_xmit(bond, skb, dev);
#endif

4880
	switch (BOND_MODE(bond)) {
4881 4882 4883 4884
	case BOND_MODE_ROUNDROBIN:
		return bond_xmit_roundrobin(skb, dev);
	case BOND_MODE_ACTIVEBACKUP:
		return bond_xmit_activebackup(skb, dev);
4885
	case BOND_MODE_8023AD:
4886
	case BOND_MODE_XOR:
4887
		return bond_3ad_xor_xmit(skb, dev);
4888 4889 4890 4891
	case BOND_MODE_BROADCAST:
		return bond_xmit_broadcast(skb, dev);
	case BOND_MODE_ALB:
		return bond_alb_xmit(skb, dev);
4892 4893
	case BOND_MODE_TLB:
		return bond_tlb_xmit(skb, dev);
4894 4895
	default:
		/* Should never happen, mode already checked */
4896
		netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
4897
		WARN_ON_ONCE(1);
4898
		return bond_tx_drop(dev, skb);
4899 4900 4901
	}
}

4902 4903 4904 4905 4906
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct bonding *bond = netdev_priv(dev);
	netdev_tx_t ret = NETDEV_TX_OK;

4907
	/* If we risk deadlock from transmitting this in the
4908 4909
	 * netpoll path, tell netpoll to queue the frame for later tx
	 */
4910
	if (unlikely(is_netpoll_tx_blocked(dev)))
4911 4912
		return NETDEV_TX_BUSY;

4913
	rcu_read_lock();
4914
	if (bond_has_slaves(bond))
4915 4916
		ret = __bond_start_xmit(skb, dev);
	else
4917
		ret = bond_tx_drop(dev, skb);
4918
	rcu_read_unlock();
4919 4920 4921

	return ret;
}
4922

4923 4924 4925 4926 4927 4928 4929 4930 4931 4932
static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
{
	if (speed == 0 || speed == SPEED_UNKNOWN)
		speed = slave->speed;
	else
		speed = min(speed, slave->speed);

	return speed;
}

4933 4934
static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
					   struct ethtool_link_ksettings *cmd)
4935 4936
{
	struct bonding *bond = netdev_priv(bond_dev);
4937
	struct list_head *iter;
4938
	struct slave *slave;
4939
	u32 speed = 0;
4940

4941 4942
	cmd->base.duplex = DUPLEX_UNKNOWN;
	cmd->base.port = PORT_OTHER;
4943

4944
	/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
4945 4946 4947 4948
	 * do not need to check mode.  Though link speed might not represent
	 * the true receive or transmit bandwidth (not all modes are symmetric)
	 * this is an accurate maximum.
	 */
4949
	bond_for_each_slave(bond, slave, iter) {
4950
		if (bond_slave_can_tx(slave)) {
4951 4952 4953 4954 4955 4956 4957
			if (slave->speed != SPEED_UNKNOWN) {
				if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
					speed = bond_mode_bcast_speed(slave,
								      speed);
				else
					speed += slave->speed;
			}
4958
			if (cmd->base.duplex == DUPLEX_UNKNOWN &&
4959
			    slave->duplex != DUPLEX_UNKNOWN)
4960
				cmd->base.duplex = slave->duplex;
4961 4962
		}
	}
4963
	cmd->base.speed = speed ? : SPEED_UNKNOWN;
4964

4965 4966 4967
	return 0;
}

4968
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4969
				     struct ethtool_drvinfo *drvinfo)
4970
{
4971 4972 4973
	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
		 BOND_ABI_VERSION);
4974 4975
}

4976
static const struct ethtool_ops bond_ethtool_ops = {
4977
	.get_drvinfo		= bond_ethtool_get_drvinfo,
4978
	.get_link		= ethtool_op_get_link,
4979
	.get_link_ksettings	= bond_ethtool_get_link_ksettings,
4980 4981
};

4982
static const struct net_device_ops bond_netdev_ops = {
4983
	.ndo_init		= bond_init,
S
Stephen Hemminger 已提交
4984
	.ndo_uninit		= bond_uninit,
4985 4986
	.ndo_open		= bond_open,
	.ndo_stop		= bond_close,
4987
	.ndo_start_xmit		= bond_start_xmit,
4988
	.ndo_select_queue	= bond_select_queue,
4989
	.ndo_get_stats64	= bond_get_stats,
4990
	.ndo_eth_ioctl		= bond_eth_ioctl,
4991
	.ndo_do_ioctl		= bond_do_ioctl,
A
Arnd Bergmann 已提交
4992
	.ndo_siocdevprivate	= bond_siocdevprivate,
4993
	.ndo_change_rx_flags	= bond_change_rx_flags,
4994
	.ndo_set_rx_mode	= bond_set_rx_mode,
4995
	.ndo_change_mtu		= bond_change_mtu,
J
Jiri Pirko 已提交
4996
	.ndo_set_mac_address	= bond_set_mac_address,
4997
	.ndo_neigh_setup	= bond_neigh_setup,
J
Jiri Pirko 已提交
4998
	.ndo_vlan_rx_add_vid	= bond_vlan_rx_add_vid,
4999
	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
5000
#ifdef CONFIG_NET_POLL_CONTROLLER
5001
	.ndo_netpoll_setup	= bond_netpoll_setup,
5002 5003 5004
	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
	.ndo_poll_controller	= bond_poll_controller,
#endif
J
Jiri Pirko 已提交
5005 5006
	.ndo_add_slave		= bond_enslave,
	.ndo_del_slave		= bond_release,
5007
	.ndo_fix_features	= bond_fix_features,
5008
	.ndo_features_check	= passthru_features_check,
5009
	.ndo_get_xmit_slave	= bond_xmit_get_slave,
5010
	.ndo_sk_get_lower_dev	= bond_sk_get_lower_dev,
5011 5012
};

5013 5014 5015 5016
static const struct device_type bond_type = {
	.name = "bond",
};

5017 5018 5019
static void bond_destructor(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);
5020

5021 5022
	if (bond->wq)
		destroy_workqueue(bond->wq);
5023 5024 5025

	if (bond->rr_tx_counter)
		free_percpu(bond->rr_tx_counter);
5026 5027
}

5028
void bond_setup(struct net_device *bond_dev)
L
Linus Torvalds 已提交
5029
{
5030
	struct bonding *bond = netdev_priv(bond_dev);
L
Linus Torvalds 已提交
5031

5032
	spin_lock_init(&bond->mode_lock);
5033
	bond->params = bonding_defaults;
L
Linus Torvalds 已提交
5034 5035 5036 5037 5038

	/* Initialize pointers */
	bond->dev = bond_dev;

	/* Initialize the device entry points */
5039
	ether_setup(bond_dev);
W
WANG Cong 已提交
5040
	bond_dev->max_mtu = ETH_MAX_MTU;
5041
	bond_dev->netdev_ops = &bond_netdev_ops;
5042
	bond_dev->ethtool_ops = &bond_ethtool_ops;
L
Linus Torvalds 已提交
5043

5044 5045
	bond_dev->needs_free_netdev = true;
	bond_dev->priv_destructor = bond_destructor;
L
Linus Torvalds 已提交
5046

5047 5048
	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);

L
Linus Torvalds 已提交
5049
	/* Initialize the device options */
5050
	bond_dev->flags |= IFF_MASTER;
5051
	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
5052
	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
5053

5054 5055
#ifdef CONFIG_XFRM_OFFLOAD
	/* set up xfrm device ops (only supported in active-backup right now) */
5056
	bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
5057 5058
	INIT_LIST_HEAD(&bond->ipsec_list);
	spin_lock_init(&bond->ipsec_lock);
5059 5060
#endif /* CONFIG_XFRM_OFFLOAD */

5061
	/* don't acquire bond device's netif_tx_lock when transmitting */
L
Linus Torvalds 已提交
5062 5063 5064 5065 5066 5067 5068 5069 5070
	bond_dev->features |= NETIF_F_LLTX;

	/* By default, we declare the bond to be fully
	 * VLAN hardware accelerated capable. Special
	 * care is taken in the various xmit functions
	 * when there are slaves that are not hw accel
	 * capable
	 */

5071 5072 5073
	/* Don't allow bond devices to change network namespaces. */
	bond_dev->features |= NETIF_F_NETNS_LOCAL;

5074
	bond_dev->hw_features = BOND_VLAN_FEATURES |
5075 5076
				NETIF_F_HW_VLAN_CTAG_RX |
				NETIF_F_HW_VLAN_CTAG_FILTER;
5077

5078
	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
5079
	bond_dev->features |= bond_dev->hw_features;
5080
	bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
5081
#ifdef CONFIG_XFRM_OFFLOAD
5082 5083 5084 5085
	bond_dev->hw_features |= BOND_XFRM_FEATURES;
	/* Only enable XFRM features if this is an active-backup config */
	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
		bond_dev->features |= BOND_XFRM_FEATURES;
5086
#endif /* CONFIG_XFRM_OFFLOAD */
5087 5088 5089 5090
#if IS_ENABLED(CONFIG_TLS_DEVICE)
	if (bond_sk_check(bond))
		bond_dev->features |= BOND_TLS_FEATURES;
#endif
L
Linus Torvalds 已提交
5091 5092
}

5093 5094 5095
/* Destroy a bonding device.
 * Must be under rtnl_lock when this function is called.
 */
5096
static void bond_uninit(struct net_device *bond_dev)
J
Jay Vosburgh 已提交
5097
{
5098
	struct bonding *bond = netdev_priv(bond_dev);
M
Maor Gottlieb 已提交
5099
	struct bond_up_slave *usable, *all;
5100 5101
	struct list_head *iter;
	struct slave *slave;
J
Jay Vosburgh 已提交
5102

5103 5104
	bond_netpoll_cleanup(bond_dev);

5105
	/* Release the bonded slaves */
5106
	bond_for_each_slave(bond, slave, iter)
5107
		__bond_release_one(bond_dev, slave->dev, true, true);
5108
	netdev_info(bond_dev, "Released all slaves\n");
5109

M
Maor Gottlieb 已提交
5110 5111
	usable = rtnl_dereference(bond->usable_slaves);
	if (usable) {
5112
		RCU_INIT_POINTER(bond->usable_slaves, NULL);
M
Maor Gottlieb 已提交
5113 5114 5115 5116 5117 5118 5119
		kfree_rcu(usable, rcu);
	}

	all = rtnl_dereference(bond->all_slaves);
	if (all) {
		RCU_INIT_POINTER(bond->all_slaves, NULL);
		kfree_rcu(all, rcu);
5120 5121
	}

J
Jay Vosburgh 已提交
5122 5123
	list_del(&bond->bond_list);

5124
	bond_debug_unregister(bond);
J
Jay Vosburgh 已提交
5125 5126
}

L
Linus Torvalds 已提交
5127 5128 5129 5130
/*------------------------- Module initialization ---------------------------*/

static int bond_check_params(struct bond_params *params)
{
5131
	int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
5132 5133
	struct bond_opt_value newval;
	const struct bond_opt_value *valptr;
5134
	int arp_all_targets_value = 0;
5135
	u16 ad_actor_sys_prio = 0;
5136
	u16 ad_user_port_key = 0;
5137
	__be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
5138 5139 5140 5141
	int arp_ip_count;
	int bond_mode	= BOND_MODE_ROUNDROBIN;
	int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
	int lacp_fast = 0;
5142
	int tlb_dynamic_lb;
5143

5144
	/* Convert string parameters. */
L
Linus Torvalds 已提交
5145
	if (mode) {
5146 5147 5148 5149
		bond_opt_initstr(&newval, mode);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
		if (!valptr) {
			pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
L
Linus Torvalds 已提交
5150 5151
			return -EINVAL;
		}
5152
		bond_mode = valptr->value;
L
Linus Torvalds 已提交
5153 5154
	}

5155
	if (xmit_hash_policy) {
5156 5157 5158
		if (bond_mode == BOND_MODE_ROUNDROBIN ||
		    bond_mode == BOND_MODE_ACTIVEBACKUP ||
		    bond_mode == BOND_MODE_BROADCAST) {
J
Joe Perches 已提交
5159
			pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
J
Joe Perches 已提交
5160
				bond_mode_name(bond_mode));
5161
		} else {
5162 5163 5164 5165
			bond_opt_initstr(&newval, xmit_hash_policy);
			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
						&newval);
			if (!valptr) {
J
Joe Perches 已提交
5166
				pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
5167 5168 5169
				       xmit_hash_policy);
				return -EINVAL;
			}
5170
			xmit_hashtype = valptr->value;
5171 5172 5173
		}
	}

L
Linus Torvalds 已提交
5174 5175
	if (lacp_rate) {
		if (bond_mode != BOND_MODE_8023AD) {
J
Joe Perches 已提交
5176 5177
			pr_info("lacp_rate param is irrelevant in mode %s\n",
				bond_mode_name(bond_mode));
L
Linus Torvalds 已提交
5178
		} else {
5179 5180 5181 5182
			bond_opt_initstr(&newval, lacp_rate);
			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
						&newval);
			if (!valptr) {
J
Joe Perches 已提交
5183
				pr_err("Error: Invalid lacp rate \"%s\"\n",
5184
				       lacp_rate);
L
Linus Torvalds 已提交
5185 5186
				return -EINVAL;
			}
5187
			lacp_fast = valptr->value;
L
Linus Torvalds 已提交
5188 5189 5190
		}
	}

5191
	if (ad_select) {
5192
		bond_opt_initstr(&newval, ad_select);
5193 5194 5195 5196
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
					&newval);
		if (!valptr) {
			pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
5197 5198
			return -EINVAL;
		}
5199 5200
		params->ad_select = valptr->value;
		if (bond_mode != BOND_MODE_8023AD)
5201
			pr_warn("ad_select param only affects 802.3ad mode\n");
5202 5203 5204 5205
	} else {
		params->ad_select = BOND_AD_STABLE;
	}

5206
	if (max_bonds < 0) {
5207 5208
		pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
			max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
L
Linus Torvalds 已提交
5209 5210 5211 5212
		max_bonds = BOND_DEFAULT_MAX_BONDS;
	}

	if (miimon < 0) {
5213 5214
		pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			miimon, INT_MAX);
5215
		miimon = 0;
L
Linus Torvalds 已提交
5216 5217 5218
	}

	if (updelay < 0) {
5219 5220
		pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			updelay, INT_MAX);
L
Linus Torvalds 已提交
5221 5222 5223 5224
		updelay = 0;
	}

	if (downdelay < 0) {
5225 5226
		pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			downdelay, INT_MAX);
L
Linus Torvalds 已提交
5227 5228 5229
		downdelay = 0;
	}

5230 5231
	if ((use_carrier != 0) && (use_carrier != 1)) {
		pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
5232
			use_carrier);
L
Linus Torvalds 已提交
5233 5234 5235
		use_carrier = 1;
	}

5236
	if (num_peer_notif < 0 || num_peer_notif > 255) {
5237 5238
		pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
			num_peer_notif);
5239 5240 5241
		num_peer_notif = 1;
	}

5242
	/* reset values for 802.3ad/TLB/ALB */
5243
	if (!bond_mode_uses_arp(bond_mode)) {
L
Linus Torvalds 已提交
5244
		if (!miimon) {
5245 5246
			pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
			pr_warn("Forcing miimon to 100msec\n");
5247
			miimon = BOND_DEFAULT_MIIMON;
L
Linus Torvalds 已提交
5248 5249 5250
		}
	}

5251
	if (tx_queues < 1 || tx_queues > 255) {
5252 5253
		pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
			tx_queues, BOND_DEFAULT_TX_QUEUES);
5254 5255 5256
		tx_queues = BOND_DEFAULT_TX_QUEUES;
	}

5257
	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
5258 5259
		pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
			all_slaves_active);
5260 5261 5262
		all_slaves_active = 0;
	}

5263
	if (resend_igmp < 0 || resend_igmp > 255) {
5264 5265
		pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
			resend_igmp, BOND_DEFAULT_RESEND_IGMP);
5266 5267 5268
		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
	}

5269 5270
	bond_opt_initval(&newval, packets_per_slave);
	if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
5271 5272 5273 5274 5275
		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
			packets_per_slave, USHRT_MAX);
		packets_per_slave = 1;
	}

L
Linus Torvalds 已提交
5276
	if (bond_mode == BOND_MODE_ALB) {
J
Joe Perches 已提交
5277 5278
		pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
			  updelay);
L
Linus Torvalds 已提交
5279 5280 5281 5282 5283 5284 5285
	}

	if (!miimon) {
		if (updelay || downdelay) {
			/* just warn the user the up/down delay will have
			 * no effect since miimon is zero...
			 */
5286 5287
			pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
				updelay, downdelay);
L
Linus Torvalds 已提交
5288 5289 5290 5291
		}
	} else {
		/* don't allow arp monitoring */
		if (arp_interval) {
5292 5293
			pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
				miimon, arp_interval);
L
Linus Torvalds 已提交
5294 5295 5296 5297
			arp_interval = 0;
		}

		if ((updelay % miimon) != 0) {
5298 5299
			pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
				updelay, miimon, (updelay / miimon) * miimon);
L
Linus Torvalds 已提交
5300 5301 5302 5303 5304
		}

		updelay /= miimon;

		if ((downdelay % miimon) != 0) {
5305 5306 5307
			pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
				downdelay, miimon,
				(downdelay / miimon) * miimon);
L
Linus Torvalds 已提交
5308 5309 5310 5311 5312 5313
		}

		downdelay /= miimon;
	}

	if (arp_interval < 0) {
5314 5315
		pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
			arp_interval, INT_MAX);
5316
		arp_interval = 0;
L
Linus Torvalds 已提交
5317 5318
	}

5319 5320
	for (arp_ip_count = 0, i = 0;
	     (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
5321
		__be32 ip;
5322 5323

		/* not a complete check, but good enough to catch mistakes */
5324
		if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
5325
		    !bond_is_ip_target_ok(ip)) {
5326 5327
			pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
				arp_ip_target[i]);
L
Linus Torvalds 已提交
5328 5329
			arp_interval = 0;
		} else {
5330 5331 5332
			if (bond_get_targets_ip(arp_target, ip) == -1)
				arp_target[arp_ip_count++] = ip;
			else
5333 5334
				pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
					&ip);
L
Linus Torvalds 已提交
5335 5336 5337 5338 5339
		}
	}

	if (arp_interval && !arp_ip_count) {
		/* don't allow arping if no arp_ip_target given... */
5340 5341
		pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
			arp_interval);
L
Linus Torvalds 已提交
5342 5343 5344
		arp_interval = 0;
	}

5345 5346
	if (arp_validate) {
		if (!arp_interval) {
J
Joe Perches 已提交
5347
			pr_err("arp_validate requires arp_interval\n");
5348 5349 5350
			return -EINVAL;
		}

5351 5352 5353 5354
		bond_opt_initstr(&newval, arp_validate);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
5355
			pr_err("Error: invalid arp_validate \"%s\"\n",
5356
			       arp_validate);
5357 5358
			return -EINVAL;
		}
5359 5360
		arp_validate_value = valptr->value;
	} else {
5361
		arp_validate_value = 0;
5362
	}
5363

5364
	if (arp_all_targets) {
5365 5366 5367 5368
		bond_opt_initstr(&newval, arp_all_targets);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
					&newval);
		if (!valptr) {
5369 5370 5371
			pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
			       arp_all_targets);
			arp_all_targets_value = 0;
5372 5373
		} else {
			arp_all_targets_value = valptr->value;
5374 5375 5376
		}
	}

L
Linus Torvalds 已提交
5377
	if (miimon) {
J
Joe Perches 已提交
5378
		pr_info("MII link monitoring set to %d ms\n", miimon);
L
Linus Torvalds 已提交
5379
	} else if (arp_interval) {
5380 5381
		valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
					  arp_validate_value);
J
Joe Perches 已提交
5382
		pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
5383
			arp_interval, valptr->string, arp_ip_count);
L
Linus Torvalds 已提交
5384 5385

		for (i = 0; i < arp_ip_count; i++)
J
Joe Perches 已提交
5386
			pr_cont(" %s", arp_ip_target[i]);
L
Linus Torvalds 已提交
5387

J
Joe Perches 已提交
5388
		pr_cont("\n");
L
Linus Torvalds 已提交
5389

5390
	} else if (max_bonds) {
L
Linus Torvalds 已提交
5391 5392 5393
		/* miimon and arp_interval not set, we need one so things
		 * work as expected, see bonding.txt for details
		 */
J
Joe Perches 已提交
5394
		pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
L
Linus Torvalds 已提交
5395 5396
	}

5397
	if (primary && !bond_mode_uses_primary(bond_mode)) {
L
Linus Torvalds 已提交
5398 5399 5400
		/* currently, using a primary only makes sense
		 * in active backup, TLB or ALB modes
		 */
5401 5402
		pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
			primary, bond_mode_name(bond_mode));
L
Linus Torvalds 已提交
5403 5404 5405
		primary = NULL;
	}

5406
	if (primary && primary_reselect) {
5407 5408 5409 5410
		bond_opt_initstr(&newval, primary_reselect);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
5411
			pr_err("Error: Invalid primary_reselect \"%s\"\n",
5412
			       primary_reselect);
5413 5414
			return -EINVAL;
		}
5415
		primary_reselect_value = valptr->value;
5416 5417 5418 5419
	} else {
		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
	}

5420
	if (fail_over_mac) {
5421 5422 5423 5424
		bond_opt_initstr(&newval, fail_over_mac);
		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
					&newval);
		if (!valptr) {
J
Joe Perches 已提交
5425
			pr_err("Error: invalid fail_over_mac \"%s\"\n",
5426
			       fail_over_mac);
5427 5428
			return -EINVAL;
		}
5429
		fail_over_mac_value = valptr->value;
5430
		if (bond_mode != BOND_MODE_ACTIVEBACKUP)
5431
			pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
5432 5433 5434
	} else {
		fail_over_mac_value = BOND_FOM_NONE;
	}
5435

5436 5437 5438 5439 5440 5441 5442 5443 5444 5445
	bond_opt_initstr(&newval, "default");
	valptr = bond_opt_parse(
			bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
				     &newval);
	if (!valptr) {
		pr_err("Error: No ad_actor_sys_prio default value");
		return -EINVAL;
	}
	ad_actor_sys_prio = valptr->value;

5446 5447 5448 5449 5450 5451 5452 5453
	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
				&newval);
	if (!valptr) {
		pr_err("Error: No ad_user_port_key default value");
		return -EINVAL;
	}
	ad_user_port_key = valptr->value;

5454 5455 5456 5457 5458
	bond_opt_initstr(&newval, "default");
	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
	if (!valptr) {
		pr_err("Error: No tlb_dynamic_lb default value");
		return -EINVAL;
5459
	}
5460
	tlb_dynamic_lb = valptr->value;
5461

5462
	if (lp_interval == 0) {
5463 5464
		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
			INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
5465 5466 5467
		lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
	}

L
Linus Torvalds 已提交
5468 5469
	/* fill params struct with the proper values */
	params->mode = bond_mode;
5470
	params->xmit_policy = xmit_hashtype;
L
Linus Torvalds 已提交
5471
	params->miimon = miimon;
5472
	params->num_peer_notif = num_peer_notif;
L
Linus Torvalds 已提交
5473
	params->arp_interval = arp_interval;
5474
	params->arp_validate = arp_validate_value;
5475
	params->arp_all_targets = arp_all_targets_value;
L
Linus Torvalds 已提交
5476 5477
	params->updelay = updelay;
	params->downdelay = downdelay;
5478
	params->peer_notif_delay = 0;
L
Linus Torvalds 已提交
5479 5480 5481
	params->use_carrier = use_carrier;
	params->lacp_fast = lacp_fast;
	params->primary[0] = 0;
5482
	params->primary_reselect = primary_reselect_value;
5483
	params->fail_over_mac = fail_over_mac_value;
5484
	params->tx_queues = tx_queues;
5485
	params->all_slaves_active = all_slaves_active;
5486
	params->resend_igmp = resend_igmp;
5487
	params->min_links = min_links;
5488
	params->lp_interval = lp_interval;
5489
	params->packets_per_slave = packets_per_slave;
5490
	params->tlb_dynamic_lb = tlb_dynamic_lb;
5491
	params->ad_actor_sys_prio = ad_actor_sys_prio;
5492
	eth_zero_addr(params->ad_actor_system);
5493
	params->ad_user_port_key = ad_user_port_key;
5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504
	if (packets_per_slave > 0) {
		params->reciprocal_packets_per_slave =
			reciprocal_value(packets_per_slave);
	} else {
		/* reciprocal_packets_per_slave is unused if
		 * packets_per_slave is 0 or 1, just initialize it
		 */
		params->reciprocal_packets_per_slave =
			(struct reciprocal_value) { 0 };
	}

5505 5506
	if (primary)
		strscpy_pad(params->primary, primary, sizeof(params->primary));
L
Linus Torvalds 已提交
5507 5508 5509 5510 5511 5512

	memcpy(params->arp_targets, arp_target, sizeof(arp_target));

	return 0;
}

5513
/* Called from registration process */
5514 5515 5516
static int bond_init(struct net_device *bond_dev)
{
	struct bonding *bond = netdev_priv(bond_dev);
5517
	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
5518

5519
	netdev_dbg(bond_dev, "Begin bond_init\n");
5520

5521
	bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
5522 5523 5524
	if (!bond->wq)
		return -ENOMEM;

5525 5526 5527 5528 5529 5530 5531 5532 5533
	if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
		bond->rr_tx_counter = alloc_percpu(u32);
		if (!bond->rr_tx_counter) {
			destroy_workqueue(bond->wq);
			bond->wq = NULL;
			return -ENOMEM;
		}
	}

5534
	spin_lock_init(&bond->stats_lock);
5535
	netdev_lockdep_set_classes(bond_dev);
5536

5537
	list_add_tail(&bond->bond_list, &bn->dev_list);
5538

5539
	bond_prepare_sysfs_group(bond);
5540

5541 5542
	bond_debug_register(bond);

5543 5544
	/* Ensure valid dev_addr */
	if (is_zero_ether_addr(bond_dev->dev_addr) &&
5545
	    bond_dev->addr_assign_type == NET_ADDR_PERM)
5546 5547
		eth_hw_addr_random(bond_dev);

5548 5549 5550
	return 0;
}

5551
unsigned int bond_get_num_tx_queues(void)
5552
{
5553
	return tx_queues;
5554 5555
}

5556
/* Create a new bond based on the specified name and bonding parameters.
5557
 * If name is NULL, obtain a suitable "bond%d" name for us.
5558 5559 5560
 * Caller must NOT hold rtnl_lock; we need to release it here before we
 * set up our sysfs entries.
 */
5561
int bond_create(struct net *net, const char *name)
5562 5563
{
	struct net_device *bond_dev;
5564 5565
	struct bonding *bond;
	struct alb_bond_info *bond_info;
5566 5567 5568
	int res;

	rtnl_lock();
5569

5570
	bond_dev = alloc_netdev_mq(sizeof(struct bonding),
5571
				   name ? name : "bond%d", NET_NAME_UNKNOWN,
5572
				   bond_setup, tx_queues);
5573
	if (!bond_dev) {
J
Joe Perches 已提交
5574
		pr_err("%s: eek! can't alloc netdev!\n", name);
5575 5576
		rtnl_unlock();
		return -ENOMEM;
5577 5578
	}

5579 5580 5581 5582 5583 5584 5585 5586
	/*
	 * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
	 * It is set to 0 by default which is wrong.
	 */
	bond = netdev_priv(bond_dev);
	bond_info = &(BOND_ALB_INFO(bond));
	bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;

5587
	dev_net_set(bond_dev, net);
5588 5589
	bond_dev->rtnl_link_ops = &bond_link_ops;

5590
	res = register_netdevice(bond_dev);
5591 5592 5593 5594 5595 5596
	if (res < 0) {
		free_netdev(bond_dev);
		rtnl_unlock();

		return res;
	}
5597

5598 5599
	netif_carrier_off(bond_dev);

5600 5601
	bond_work_init_all(bond);

5602
	rtnl_unlock();
5603
	return 0;
5604 5605
}

5606
static int __net_init bond_net_init(struct net *net)
5607
{
5608
	struct bond_net *bn = net_generic(net, bond_net_id);
5609 5610 5611 5612 5613

	bn->net = net;
	INIT_LIST_HEAD(&bn->dev_list);

	bond_create_proc_dir(bn);
5614
	bond_create_sysfs(bn);
5615

5616
	return 0;
5617 5618
}

5619
static void __net_exit bond_net_exit(struct net *net)
5620
{
5621
	struct bond_net *bn = net_generic(net, bond_net_id);
5622 5623
	struct bonding *bond, *tmp_bond;
	LIST_HEAD(list);
5624

5625
	bond_destroy_sysfs(bn);
5626 5627 5628 5629 5630 5631 5632

	/* Kill off any bonds created after unregistering bond rtnl ops */
	rtnl_lock();
	list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
		unregister_netdevice_queue(bond->dev, &list);
	unregister_netdevice_many(&list);
	rtnl_unlock();
5633 5634

	bond_destroy_proc_dir(bn);
5635 5636 5637 5638 5639
}

static struct pernet_operations bond_net_ops = {
	.init = bond_net_init,
	.exit = bond_net_exit,
5640 5641
	.id   = &bond_net_id,
	.size = sizeof(struct bond_net),
5642 5643
};

L
Linus Torvalds 已提交
5644 5645 5646 5647 5648
static int __init bonding_init(void)
{
	int i;
	int res;

5649
	res = bond_check_params(&bonding_defaults);
S
Stephen Hemminger 已提交
5650
	if (res)
5651
		goto out;
L
Linus Torvalds 已提交
5652

5653
	res = register_pernet_subsys(&bond_net_ops);
5654 5655
	if (res)
		goto out;
5656

5657
	res = bond_netlink_init();
5658
	if (res)
5659
		goto err_link;
5660

5661 5662
	bond_create_debugfs();

L
Linus Torvalds 已提交
5663
	for (i = 0; i < max_bonds; i++) {
5664
		res = bond_create(&init_net, NULL);
5665 5666
		if (res)
			goto err;
L
Linus Torvalds 已提交
5667 5668
	}

5669 5670 5671 5672
	skb_flow_dissector_init(&flow_keys_bonding,
				flow_keys_bonding_keys,
				ARRAY_SIZE(flow_keys_bonding_keys));

L
Linus Torvalds 已提交
5673
	register_netdevice_notifier(&bond_netdev_notifier);
5674
out:
L
Linus Torvalds 已提交
5675
	return res;
5676
err:
5677
	bond_destroy_debugfs();
5678
	bond_netlink_fini();
5679
err_link:
5680
	unregister_pernet_subsys(&bond_net_ops);
5681
	goto out;
5682

L
Linus Torvalds 已提交
5683 5684 5685 5686 5687 5688
}

static void __exit bonding_exit(void)
{
	unregister_netdevice_notifier(&bond_netdev_notifier);

5689
	bond_destroy_debugfs();
5690

5691
	bond_netlink_fini();
5692
	unregister_pernet_subsys(&bond_net_ops);
5693 5694

#ifdef CONFIG_NET_POLL_CONTROLLER
5695
	/* Make sure we don't have an imbalance on our netpoll blocking */
5696
	WARN_ON(atomic_read(&netpoll_block_tx));
5697
#endif
L
Linus Torvalds 已提交
5698 5699 5700 5701 5702
}

module_init(bonding_init);
module_exit(bonding_exit);
MODULE_LICENSE("GPL");
5703
MODULE_DESCRIPTION(DRV_DESCRIPTION);
L
Linus Torvalds 已提交
5704
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");