br_netlink.c 47.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8 9
/*
 *	Bridge netlink control interface
 *
 *	Authors:
 *	Stephen Hemminger		<shemminger@osdl.org>
 */

#include <linux/kernel.h>
10
#include <linux/slab.h>
11
#include <linux/etherdevice.h>
12
#include <net/rtnetlink.h>
13
#include <net/net_namespace.h>
14
#include <net/sock.h>
15
#include <uapi/linux/if_bridge.h>
16

17
#include "br_private.h"
18
#include "br_private_stp.h"
19
#include "br_private_tunnel.h"
20

21
static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
22
				u32 filter_mask)
23
{
24 25
	struct net_bridge_vlan *v;
	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
26
	u16 flags, pvid;
27 28 29 30 31
	int num_vlans = 0;

	if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
		return 0;

32
	pvid = br_get_pvid(vg);
33
	/* Count number of vlan infos */
34
	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
35
		flags = 0;
36 37 38 39
		/* only a context, bridge vlan not activated */
		if (!br_vlan_should_use(v))
			continue;
		if (v->vid == pvid)
40 41
			flags |= BRIDGE_VLAN_INFO_PVID;

42
		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
43 44 45 46
			flags |= BRIDGE_VLAN_INFO_UNTAGGED;

		if (vid_range_start == 0) {
			goto initvars;
47
		} else if ((v->vid - vid_range_end) == 1 &&
48
			flags == vid_range_flags) {
49
			vid_range_end = v->vid;
50 51 52 53 54 55 56 57
			continue;
		} else {
			if ((vid_range_end - vid_range_start) > 0)
				num_vlans += 2;
			else
				num_vlans += 1;
		}
initvars:
58 59
		vid_range_start = v->vid;
		vid_range_end = v->vid;
60 61 62 63 64 65 66 67 68 69 70 71 72
		vid_range_flags = flags;
	}

	if (vid_range_start != 0) {
		if ((vid_range_end - vid_range_start) > 0)
			num_vlans += 2;
		else
			num_vlans += 1;
	}

	return num_vlans;
}

73
static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
74
				 u32 filter_mask)
75
{
76 77
	int num_vlans;

78 79 80 81 82 83
	if (!vg)
		return 0;

	if (filter_mask & RTEXT_FILTER_BRVLAN)
		return vg->num_vlans;

84 85 86 87 88
	rcu_read_lock();
	num_vlans = __get_num_vlan_infos(vg, filter_mask);
	rcu_read_unlock();

	return num_vlans;
89 90
}

91 92
static size_t br_get_link_af_size_filtered(const struct net_device *dev,
					   u32 filter_mask)
93
{
94
	struct net_bridge_vlan_group *vg = NULL;
95
	struct net_bridge_port *p = NULL;
96
	struct net_bridge *br;
97
	int num_vlan_infos;
98
	size_t vinfo_sz = 0;
99

100
	rcu_read_lock();
101
	if (netif_is_bridge_port(dev)) {
102
		p = br_port_get_rcu(dev);
103
		vg = nbp_vlan_group_rcu(p);
104 105
	} else if (dev->priv_flags & IFF_EBRIDGE) {
		br = netdev_priv(dev);
106
		vg = br_vlan_group_rcu(br);
107
	}
108
	num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
109
	rcu_read_unlock();
110

111 112 113
	if (p && (p->flags & BR_VLAN_TUNNEL))
		vinfo_sz += br_get_vlan_tunnel_info_size(vg);

114
	/* Each VLAN is returned in bridge_vlan_info along with flags */
115 116 117
	vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));

	return vinfo_sz;
118 119
}

120 121 122 123 124 125
static inline size_t br_port_info_size(void)
{
	return nla_total_size(1)	/* IFLA_BRPORT_STATE  */
		+ nla_total_size(2)	/* IFLA_BRPORT_PRIORITY */
		+ nla_total_size(4)	/* IFLA_BRPORT_COST */
		+ nla_total_size(1)	/* IFLA_BRPORT_MODE */
126
		+ nla_total_size(1)	/* IFLA_BRPORT_GUARD */
S
stephen hemminger 已提交
127
		+ nla_total_size(1)	/* IFLA_BRPORT_PROTECT */
128
		+ nla_total_size(1)	/* IFLA_BRPORT_FAST_LEAVE */
F
Felix Fietkau 已提交
129
		+ nla_total_size(1)	/* IFLA_BRPORT_MCAST_TO_UCAST */
130
		+ nla_total_size(1)	/* IFLA_BRPORT_LEARNING */
131
		+ nla_total_size(1)	/* IFLA_BRPORT_UNICAST_FLOOD */
132 133
		+ nla_total_size(1)	/* IFLA_BRPORT_MCAST_FLOOD */
		+ nla_total_size(1)	/* IFLA_BRPORT_BCAST_FLOOD */
134
		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP */
135
		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP_WIFI */
136
		+ nla_total_size(1)	/* IFLA_BRPORT_VLAN_TUNNEL */
137
		+ nla_total_size(1)	/* IFLA_BRPORT_NEIGH_SUPPRESS */
138
		+ nla_total_size(1)	/* IFLA_BRPORT_ISOLATED */
139
		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_ROOT_ID */
140
		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_BRIDGE_ID */
141 142
		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_PORT */
		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_COST */
143 144
		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_ID */
		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_NO */
145 146
		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_CONFIG_PENDING */
147 148 149
		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
150 151 152
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_MULTICAST_ROUTER */
#endif
153
		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_GROUP_FWD_MASK */
154 155 156
		+ 0;
}

157
static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
158 159
{
	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
160 161 162 163 164 165
		+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
		+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
		+ nla_total_size(4) /* IFLA_MASTER */
		+ nla_total_size(4) /* IFLA_MTU */
		+ nla_total_size(4) /* IFLA_LINK */
		+ nla_total_size(1) /* IFLA_OPERSTATE */
166
		+ nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
167
		+ nla_total_size(br_get_link_af_size_filtered(dev,
168 169
				 filter_mask)) /* IFLA_AF_SPEC */
		+ nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */
170 171 172 173 174 175
}

static int br_port_fill_attrs(struct sk_buff *skb,
			      const struct net_bridge_port *p)
{
	u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
176
	struct net_bridge_port *backup_p;
177
	u64 timerval;
178 179 180 181

	if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
	    nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
	    nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
182
	    nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
S
stephen hemminger 已提交
183
	    nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
184 185 186 187
	    nla_put_u8(skb, IFLA_BRPORT_PROTECT,
		       !!(p->flags & BR_ROOT_BLOCK)) ||
	    nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
		       !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
F
Felix Fietkau 已提交
188 189
	    nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
		       !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
190
	    nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
191 192 193 194
	    nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
		       !!(p->flags & BR_FLOOD)) ||
	    nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
		       !!(p->flags & BR_MCAST_FLOOD)) ||
195 196
	    nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD,
		       !!(p->flags & BR_BCAST_FLOOD)) ||
197 198
	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
199 200
		       !!(p->flags & BR_PROXYARP_WIFI)) ||
	    nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
201 202
		    &p->designated_root) ||
	    nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
203 204
		    &p->designated_bridge) ||
	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
205 206
	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
	    nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
207 208 209
	    nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
	    nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
		       p->topology_change_ack) ||
210 211
	    nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
	    nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
212
							BR_VLAN_TUNNEL)) ||
213 214
	    nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) ||
	    nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS,
215 216
		       !!(p->flags & BR_NEIGH_SUPPRESS)) ||
	    nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)))
217 218
		return -EMSGSIZE;

219
	timerval = br_timer_value(&p->message_age_timer);
220 221
	if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
			      IFLA_BRPORT_PAD))
222 223
		return -EMSGSIZE;
	timerval = br_timer_value(&p->forward_delay_timer);
224 225
	if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
			      IFLA_BRPORT_PAD))
226 227
		return -EMSGSIZE;
	timerval = br_timer_value(&p->hold_timer);
228 229
	if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
			      IFLA_BRPORT_PAD))
230 231
		return -EMSGSIZE;

232 233 234 235 236 237
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
	if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
		       p->multicast_router))
		return -EMSGSIZE;
#endif

238 239 240 241 242 243 244 245
	/* we might be called only with br->lock */
	rcu_read_lock();
	backup_p = rcu_dereference(p->backup_port);
	if (backup_p)
		nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT,
			    backup_p->dev->ifindex);
	rcu_read_unlock();

246
	return 0;
247 248
}

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
				    u16 vid_end, u16 flags)
{
	struct  bridge_vlan_info vinfo;

	if ((vid_end - vid_start) > 0) {
		/* add range to skb */
		vinfo.vid = vid_start;
		vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
			    sizeof(vinfo), &vinfo))
			goto nla_put_failure;

		vinfo.vid = vid_end;
		vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
			    sizeof(vinfo), &vinfo))
			goto nla_put_failure;
	} else {
		vinfo.vid = vid_start;
		vinfo.flags = flags;
		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
			    sizeof(vinfo), &vinfo))
			goto nla_put_failure;
	}

	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
282
					 struct net_bridge_vlan_group *vg)
283
{
284 285
	struct net_bridge_vlan *v;
	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
286
	u16 flags, pvid;
287 288 289 290 291 292
	int err = 0;

	/* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
	 * and mark vlan info with begin and end flags
	 * if vlaninfo represents a range
	 */
293
	pvid = br_get_pvid(vg);
294
	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
295
		flags = 0;
296 297 298
		if (!br_vlan_should_use(v))
			continue;
		if (v->vid == pvid)
299 300
			flags |= BRIDGE_VLAN_INFO_PVID;

301
		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
302 303 304 305
			flags |= BRIDGE_VLAN_INFO_UNTAGGED;

		if (vid_range_start == 0) {
			goto initvars;
306
		} else if ((v->vid - vid_range_end) == 1 &&
307
			flags == vid_range_flags) {
308
			vid_range_end = v->vid;
309 310 311 312 313 314 315 316 317 318
			continue;
		} else {
			err = br_fill_ifvlaninfo_range(skb, vid_range_start,
						       vid_range_end,
						       vid_range_flags);
			if (err)
				return err;
		}

initvars:
319 320
		vid_range_start = v->vid;
		vid_range_end = v->vid;
321 322 323
		vid_range_flags = flags;
	}

324 325 326 327 328 329 330 331
	if (vid_range_start != 0) {
		/* Call it once more to send any left over vlans */
		err = br_fill_ifvlaninfo_range(skb, vid_range_start,
					       vid_range_end,
					       vid_range_flags);
		if (err)
			return err;
	}
332 333 334 335 336

	return 0;
}

static int br_fill_ifvlaninfo(struct sk_buff *skb,
337
			      struct net_bridge_vlan_group *vg)
338 339
{
	struct bridge_vlan_info vinfo;
340
	struct net_bridge_vlan *v;
341
	u16 pvid;
342

343
	pvid = br_get_pvid(vg);
344
	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
345 346 347 348
		if (!br_vlan_should_use(v))
			continue;

		vinfo.vid = v->vid;
349
		vinfo.flags = 0;
350
		if (v->vid == pvid)
351 352
			vinfo.flags |= BRIDGE_VLAN_INFO_PVID;

353
		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
354 355 356 357 358 359 360 361 362 363 364 365 366
			vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;

		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
			    sizeof(vinfo), &vinfo))
			goto nla_put_failure;
	}

	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

367 368 369 370
/*
 * Create one netlink message for one interface
 * Contains port and master info as well as carrier and bridge state.
 */
371
static int br_fill_ifinfo(struct sk_buff *skb,
372
			  const struct net_bridge_port *port,
373 374
			  u32 pid, u32 seq, int event, unsigned int flags,
			  u32 filter_mask, const struct net_device *dev)
375
{
376
	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
377
	struct net_bridge *br;
378
	struct ifinfomsg *hdr;
379 380
	struct nlmsghdr *nlh;

381 382 383 384 385
	if (port)
		br = port->br;
	else
		br = netdev_priv(dev);

386 387
	br_debug(br, "br_fill_info event %d port %s master %s\n",
		     event, dev->name, br->dev->name);
388

389 390
	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
	if (nlh == NULL)
391
		return -EMSGSIZE;
392

393 394 395 396 397 398 399
	hdr = nlmsg_data(nlh);
	hdr->ifi_family = AF_BRIDGE;
	hdr->__ifi_pad = 0;
	hdr->ifi_type = dev->type;
	hdr->ifi_index = dev->ifindex;
	hdr->ifi_flags = dev_get_flags(dev);
	hdr->ifi_change = 0;
400

D
David S. Miller 已提交
401 402 403 404 405 406
	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
	    nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
	    (dev->addr_len &&
	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
407 408
	    (dev->ifindex != dev_get_iflink(dev) &&
	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
D
David S. Miller 已提交
409
		goto nla_put_failure;
410

411
	if (event == RTM_NEWLINK && port) {
412
		struct nlattr *nest;
413

414
		nest = nla_nest_start(skb, IFLA_PROTINFO);
415 416 417 418 419
		if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
			goto nla_put_failure;
		nla_nest_end(skb, nest);
	}

420
	/* Check if  the VID information is requested */
421 422
	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
423
		struct net_bridge_vlan_group *vg;
424 425
		struct nlattr *af;
		int err;
426

427 428
		/* RCU needed because of the VLAN locking rules (rcu || rtnl) */
		rcu_read_lock();
429
		if (port)
430
			vg = nbp_vlan_group_rcu(port);
431
		else
432
			vg = br_vlan_group_rcu(br);
433

434 435
		if (!vg || !vg->num_vlans) {
			rcu_read_unlock();
436
			goto done;
437
		}
438
		af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
439 440
		if (!af) {
			rcu_read_unlock();
441
			goto nla_put_failure;
442
		}
443
		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
444
			err = br_fill_ifvlaninfo_compressed(skb, vg);
445
		else
446
			err = br_fill_ifvlaninfo(skb, vg);
447 448 449

		if (port && (port->flags & BR_VLAN_TUNNEL))
			err = br_fill_vlan_tunnel_info(skb, vg);
450
		rcu_read_unlock();
451 452
		if (err)
			goto nla_put_failure;
453 454 455 456
		nla_nest_end(skb, af);
	}

done:
457 458
	nlmsg_end(skb, nlh);
	return 0;
459

460
nla_put_failure:
461 462
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
463 464
}

465 466 467
/* Notify listeners of a change in bridge or port information */
void br_ifinfo_notify(int event, const struct net_bridge *br,
		      const struct net_bridge_port *port)
468
{
469 470
	u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
	struct net_device *dev;
471
	struct sk_buff *skb;
472
	int err = -ENOBUFS;
473 474
	struct net *net;
	u16 port_no = 0;
475

476
	if (WARN_ON(!port && !br))
477 478
		return;

479 480 481 482 483 484 485 486 487 488
	if (port) {
		dev = port->dev;
		br = port->br;
		port_no = port->port_no;
	} else {
		dev = br->dev;
	}

	net = dev_net(dev);
	br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event);
489

490
	skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC);
491 492 493
	if (skb == NULL)
		goto errout;

494
	err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev);
495 496 497 498 499 500
	if (err < 0) {
		/* -EMSGSIZE implies BUG in br_nlmsg_size() */
		WARN_ON(err == -EMSGSIZE);
		kfree_skb(skb);
		goto errout;
	}
501 502
	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
	return;
503
errout:
504
	rtnl_set_sk_err(net, RTNLGRP_LINK, err);
505 506 507 508 509
}

/*
 * Dump information about all ports, in response to GETLINK
 */
J
John Fastabend 已提交
510
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
511
	       struct net_device *dev, u32 filter_mask, int nlflags)
512
{
513
	struct net_bridge_port *port = br_port_get_rtnl(dev);
J
John Fastabend 已提交
514

515 516
	if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
	    !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
517
		return 0;
518

519
	return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
520
			      filter_mask, dev);
521 522
}

523
static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
524 525
			int cmd, struct bridge_vlan_info *vinfo, bool *changed,
			struct netlink_ext_ack *extack)
526
{
527
	bool curr_change;
528 529 530 531 532
	int err = 0;

	switch (cmd) {
	case RTM_SETLINK:
		if (p) {
533 534 535
			/* if the MASTER flag is set this will act on the global
			 * per-VLAN entry as well
			 */
536
			err = nbp_vlan_add(p, vinfo->vid, vinfo->flags,
537
					   &curr_change, extack);
538
		} else {
539
			vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
540
			err = br_vlan_add(br, vinfo->vid, vinfo->flags,
541
					  &curr_change, extack);
542
		}
543
		if (curr_change)
544
			*changed = true;
545 546 547 548
		break;

	case RTM_DELLINK:
		if (p) {
549 550 551 552 553 554 555 556
			if (!nbp_vlan_delete(p, vinfo->vid))
				*changed = true;

			if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) &&
			    !br_vlan_delete(p->br, vinfo->vid))
				*changed = true;
		} else if (!br_vlan_delete(br, vinfo->vid)) {
			*changed = true;
557 558 559 560 561 562
		}
		break;
	}

	return err;
}
563

564 565 566 567 568 569
int br_process_vlan_info(struct net_bridge *br,
			 struct net_bridge_port *p, int cmd,
			 struct bridge_vlan_info *vinfo_curr,
			 struct bridge_vlan_info **vinfo_last,
			 bool *changed,
			 struct netlink_ext_ack *extack)
570
{
571
	if (!br_vlan_valid_id(vinfo_curr->vid, extack))
572 573 574
		return -EINVAL;

	if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
575
		if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack))
576 577 578 579 580 581 582 583 584
			return -EINVAL;
		*vinfo_last = vinfo_curr;
		return 0;
	}

	if (*vinfo_last) {
		struct bridge_vlan_info tmp_vinfo;
		int v, err;

585
		if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack))
586 587 588 589 590 591
			return -EINVAL;

		memcpy(&tmp_vinfo, *vinfo_last,
		       sizeof(struct bridge_vlan_info));
		for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
			tmp_vinfo.vid = v;
592 593
			err = br_vlan_info(br, p, cmd, &tmp_vinfo, changed,
					   extack);
594 595 596 597 598
			if (err)
				break;
		}
		*vinfo_last = NULL;

599
		return err;
600 601
	}

602
	return br_vlan_info(br, p, cmd, vinfo_curr, changed, extack);
603 604
}

605 606 607
static int br_afspec(struct net_bridge *br,
		     struct net_bridge_port *p,
		     struct nlattr *af_spec,
608 609
		     int cmd, bool *changed,
		     struct netlink_ext_ack *extack)
610
{
611 612
	struct bridge_vlan_info *vinfo_curr = NULL;
	struct bridge_vlan_info *vinfo_last = NULL;
613
	struct nlattr *attr;
614 615 616
	struct vtunnel_info tinfo_last = {};
	struct vtunnel_info tinfo_curr = {};
	int err = 0, rem;
617

618
	nla_for_each_nested(attr, af_spec, rem) {
619 620 621
		err = 0;
		switch (nla_type(attr)) {
		case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
622
			if (!p || !(p->flags & BR_VLAN_TUNNEL))
623
				return -EINVAL;
624 625 626 627 628
			err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
			if (err)
				return err;
			err = br_process_vlan_tunnel_info(br, p, cmd,
							  &tinfo_curr,
629 630
							  &tinfo_last,
							  changed);
631 632 633 634 635
			if (err)
				return err;
			break;
		case IFLA_BRIDGE_VLAN_INFO:
			if (nla_len(attr) != sizeof(struct bridge_vlan_info))
636
				return -EINVAL;
637 638
			vinfo_curr = nla_data(attr);
			err = br_process_vlan_info(br, p, cmd, vinfo_curr,
639 640
						   &vinfo_last, changed,
						   extack);
641 642 643
			if (err)
				return err;
			break;
644
		}
645 646 647 648 649
	}

	return err;
}

650
static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
651 652 653 654
	[IFLA_BRPORT_STATE]	= { .type = NLA_U8 },
	[IFLA_BRPORT_COST]	= { .type = NLA_U32 },
	[IFLA_BRPORT_PRIORITY]	= { .type = NLA_U16 },
	[IFLA_BRPORT_MODE]	= { .type = NLA_U8 },
655
	[IFLA_BRPORT_GUARD]	= { .type = NLA_U8 },
S
stephen hemminger 已提交
656
	[IFLA_BRPORT_PROTECT]	= { .type = NLA_U8 },
657
	[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
658
	[IFLA_BRPORT_LEARNING]	= { .type = NLA_U8 },
659
	[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
660
	[IFLA_BRPORT_PROXYARP]	= { .type = NLA_U8 },
661
	[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
662
	[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
F
Felix Fietkau 已提交
663
	[IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
664 665
	[IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
	[IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
666
	[IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 },
667
	[IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 },
668
	[IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
669
	[IFLA_BRPORT_ISOLATED]	= { .type = NLA_U8 },
670
	[IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
671 672 673 674 675 676 677 678 679 680 681 682
};

/* Change the state of the port and notify spanning tree */
static int br_set_port_state(struct net_bridge_port *p, u8 state)
{
	if (state > BR_STATE_BLOCKING)
		return -EINVAL;

	/* if kernel STP is running, don't allow changes */
	if (p->br->stp_enabled == BR_KERNEL_STP)
		return -EBUSY;

683 684 685
	/* if device is not up, change is not allowed
	 * if link is not present, only allowable state is disabled
	 */
686
	if (!netif_running(p->dev) ||
687
	    (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
688 689
		return -ENETDOWN;

690
	br_set_state(p, state);
691 692 693 694 695
	br_port_state_selection(p->br);
	return 0;
}

/* Set/clear or port flags based on attribute */
696 697
static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
			    int attrtype, unsigned long mask)
698
{
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
	unsigned long flags;
	int err;

	if (!tb[attrtype])
		return 0;

	if (nla_get_u8(tb[attrtype]))
		flags = p->flags | mask;
	else
		flags = p->flags & ~mask;

	err = br_switchdev_set_port_flag(p, flags, mask);
	if (err)
		return err;

	p->flags = flags;
	return 0;
716 717 718 719 720
}

/* Process bridge protocol info on port */
static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
{
721
	unsigned long old_flags = p->flags;
722 723
	bool br_vlan_tunnel_old = false;
	int err;
724

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
	err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
	if (err)
		return err;
768

769
	br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
770 771 772 773
	err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
	if (err)
		return err;

774 775 776
	if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
		nbp_vlan_tunnel_info_flush(p);

777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
	if (tb[IFLA_BRPORT_COST]) {
		err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
		if (err)
			return err;
	}

	if (tb[IFLA_BRPORT_PRIORITY]) {
		err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
		if (err)
			return err;
	}

	if (tb[IFLA_BRPORT_STATE]) {
		err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
		if (err)
			return err;
	}
794

795 796 797
	if (tb[IFLA_BRPORT_FLUSH])
		br_fdb_delete_by_port(p->br, p, 0, 0);

798 799 800 801 802 803 804 805 806
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
	if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
		u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);

		err = br_multicast_set_port_router(p, mcast_router);
		if (err)
			return err;
	}
#endif
807 808 809 810 811 812 813 814 815

	if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) {
		u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]);

		if (fwd_mask & BR_GROUPFWD_MACPAUSE)
			return -EINVAL;
		p->group_fwd_mask = fwd_mask;
	}

816 817 818 819 820
	err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS,
			       BR_NEIGH_SUPPRESS);
	if (err)
		return err;

821 822 823 824
	err = br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
	if (err)
		return err;

825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
	if (tb[IFLA_BRPORT_BACKUP_PORT]) {
		struct net_device *backup_dev = NULL;
		u32 backup_ifindex;

		backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]);
		if (backup_ifindex) {
			backup_dev = __dev_get_by_index(dev_net(p->dev),
							backup_ifindex);
			if (!backup_dev)
				return -ENOENT;
		}

		err = nbp_backup_change(p, backup_dev);
		if (err)
			return err;
	}

842
	br_port_flags_change(p, old_flags ^ p->flags);
843 844 845 846
	return 0;
}

/* Change state and parameters on port. */
P
Petr Machata 已提交
847 848
int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags,
	       struct netlink_ext_ack *extack)
849
{
850 851 852
	struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
	struct nlattr *tb[IFLA_BRPORT_MAX + 1];
	struct net_bridge_port *p;
853
	struct nlattr *protinfo;
854
	struct nlattr *afspec;
855
	bool changed = false;
856
	int err = 0;
857

858 859
	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
860
	if (!protinfo && !afspec)
861
		return 0;
862

863
	p = br_port_get_rtnl(dev);
864
	/* We want to accept dev as bridge itself if the AF_SPEC
S
stephen hemminger 已提交
865
	 * is set to see if someone is setting vlan info on the bridge
866
	 */
867
	if (!p && !afspec)
868
		return -EINVAL;
869

870 871
	if (p && protinfo) {
		if (protinfo->nla_type & NLA_F_NESTED) {
872 873 874 875
			err = nla_parse_nested_deprecated(tb, IFLA_BRPORT_MAX,
							  protinfo,
							  br_port_policy,
							  NULL);
876 877 878 879 880 881 882
			if (err)
				return err;

			spin_lock_bh(&p->br->lock);
			err = br_setport(p, tb);
			spin_unlock_bh(&p->br->lock);
		} else {
S
stephen hemminger 已提交
883
			/* Binary compatibility with old RSTP */
884 885 886 887 888 889 890
			if (nla_len(protinfo) < sizeof(u8))
				return -EINVAL;

			spin_lock_bh(&p->br->lock);
			err = br_set_port_state(p, nla_get_u8(protinfo));
			spin_unlock_bh(&p->br->lock);
		}
891
		if (err)
892
			goto out;
893
		changed = true;
894
	}
895

896
	if (afspec)
897
		err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack);
898

899
	if (changed)
900
		br_ifinfo_notify(RTM_NEWLINK, br, p);
901
out:
902
	return err;
903 904
}

905
/* Delete port information */
906
int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
907
{
908
	struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
909
	struct net_bridge_port *p;
910
	struct nlattr *afspec;
911
	bool changed = false;
912
	int err = 0;
913

914
	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
915 916 917 918 919 920 921 922
	if (!afspec)
		return 0;

	p = br_port_get_rtnl(dev);
	/* We want to accept dev as bridge itself as well */
	if (!p && !(dev->priv_flags & IFF_EBRIDGE))
		return -EINVAL;

923
	err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL);
924
	if (changed)
925 926 927
		/* Send RTM_NEWLINK because userspace
		 * expects RTM_NEWLINK for vlan dels
		 */
928
		br_ifinfo_notify(RTM_NEWLINK, br, p);
929 930 931

	return err;
}
932 933 934

static int br_validate(struct nlattr *tb[], struct nlattr *data[],
		       struct netlink_ext_ack *extack)
935 936 937 938 939 940 941 942
{
	if (tb[IFLA_ADDRESS]) {
		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
			return -EINVAL;
		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
			return -EADDRNOTAVAIL;
	}

943 944 945 946 947 948 949 950 951 952 953 954 955
	if (!data)
		return 0;

#ifdef CONFIG_BRIDGE_VLAN_FILTERING
	if (data[IFLA_BR_VLAN_PROTOCOL]) {
		switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
		case htons(ETH_P_8021Q):
		case htons(ETH_P_8021AD):
			break;
		default:
			return -EPROTONOSUPPORT;
		}
	}
956 957 958 959 960 961 962

	if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
		__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);

		if (defpvid >= VLAN_VID_MASK)
			return -EINVAL;
	}
963 964
#endif

965 966 967
	return 0;
}

968 969 970
static int br_port_slave_changelink(struct net_device *brdev,
				    struct net_device *dev,
				    struct nlattr *tb[],
971 972
				    struct nlattr *data[],
				    struct netlink_ext_ack *extack)
973
{
974 975 976
	struct net_bridge *br = netdev_priv(brdev);
	int ret;

977 978
	if (!data)
		return 0;
979 980 981 982 983 984

	spin_lock_bh(&br->lock);
	ret = br_setport(br_port_get_rtnl(dev), data);
	spin_unlock_bh(&br->lock);

	return ret;
985 986
}

987 988 989 990 991 992 993 994 995 996 997 998 999
static int br_port_fill_slave_info(struct sk_buff *skb,
				   const struct net_device *brdev,
				   const struct net_device *dev)
{
	return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
}

static size_t br_port_get_slave_size(const struct net_device *brdev,
				     const struct net_device *dev)
{
	return br_port_info_size();
}

1000 1001 1002 1003
static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
	[IFLA_BR_FORWARD_DELAY]	= { .type = NLA_U32 },
	[IFLA_BR_HELLO_TIME]	= { .type = NLA_U32 },
	[IFLA_BR_MAX_AGE]	= { .type = NLA_U32 },
1004 1005 1006
	[IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
	[IFLA_BR_STP_STATE] = { .type = NLA_U32 },
	[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
1007
	[IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
1008
	[IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
1009
	[IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
1010 1011
	[IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
				 .len  = ETH_ALEN },
1012
	[IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
1013
	[IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
1014
	[IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
1015
	[IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
1016
	[IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
1017
	[IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
1018
	[IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
1019
	[IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
1020 1021 1022 1023 1024 1025
	[IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
	[IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
	[IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
	[IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
	[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
	[IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
1026 1027 1028
	[IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
	[IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
	[IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
1029
	[IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
1030
	[IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
1031
	[IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
1032
	[IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
1033
	[IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
1034
	[IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
1035 1036
	[IFLA_BR_MULTI_BOOLOPT] = { .type = NLA_EXACT_LEN,
				    .len = sizeof(struct br_boolopt_multi) },
1037 1038 1039
};

static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1040 1041
			 struct nlattr *data[],
			 struct netlink_ext_ack *extack)
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
{
	struct net_bridge *br = netdev_priv(brdev);
	int err;

	if (!data)
		return 0;

	if (data[IFLA_BR_FORWARD_DELAY]) {
		err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
		if (err)
			return err;
	}

	if (data[IFLA_BR_HELLO_TIME]) {
		err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
		if (err)
			return err;
	}

	if (data[IFLA_BR_MAX_AGE]) {
		err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
		if (err)
			return err;
	}

1067
	if (data[IFLA_BR_AGEING_TIME]) {
1068 1069 1070
		err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
		if (err)
			return err;
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
	}

	if (data[IFLA_BR_STP_STATE]) {
		u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);

		br_stp_set_enabled(br, stp_enabled);
	}

	if (data[IFLA_BR_PRIORITY]) {
		u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);

		br_stp_set_bridge_priority(br, priority);
	}

1085 1086 1087 1088 1089 1090 1091 1092
	if (data[IFLA_BR_VLAN_FILTERING]) {
		u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);

		err = __br_vlan_filter_toggle(br, vlan_filter);
		if (err)
			return err;
	}

1093 1094 1095 1096 1097 1098 1099 1100
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
	if (data[IFLA_BR_VLAN_PROTOCOL]) {
		__be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);

		err = __br_vlan_set_proto(br, vlan_proto);
		if (err)
			return err;
	}
1101 1102 1103 1104

	if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
		__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);

1105
		err = __br_vlan_set_default_pvid(br, defpvid, extack);
1106 1107 1108
		if (err)
			return err;
	}
1109 1110 1111 1112 1113 1114 1115 1116

	if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
		__u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);

		err = br_vlan_set_stats(br, vlan_stats);
		if (err)
			return err;
	}
1117 1118 1119 1120 1121 1122 1123 1124

	if (data[IFLA_BR_VLAN_STATS_PER_PORT]) {
		__u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]);

		err = br_vlan_set_stats_per_port(br, per_port);
		if (err)
			return err;
	}
1125 1126
#endif

1127 1128 1129 1130 1131 1132 1133 1134
	if (data[IFLA_BR_GROUP_FWD_MASK]) {
		u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);

		if (fwd_mask & BR_GROUPFWD_RESTRICTED)
			return -EINVAL;
		br->group_fwd_mask = fwd_mask;
	}

1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
	if (data[IFLA_BR_GROUP_ADDR]) {
		u8 new_addr[ETH_ALEN];

		if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
			return -EINVAL;
		memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
		if (!is_link_local_ether_addr(new_addr))
			return -EINVAL;
		if (new_addr[5] == 1 ||		/* 802.3x Pause address */
		    new_addr[5] == 2 ||		/* 802.3ad Slow protocols */
		    new_addr[5] == 3)		/* 802.1X PAE address */
			return -EINVAL;
		spin_lock_bh(&br->lock);
		memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
		spin_unlock_bh(&br->lock);
1150
		br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true);
1151 1152 1153
		br_recalculate_fwd_mask(br);
	}

1154 1155 1156
	if (data[IFLA_BR_FDB_FLUSH])
		br_fdb_flush(br);

1157 1158 1159 1160 1161 1162 1163 1164
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
	if (data[IFLA_BR_MCAST_ROUTER]) {
		u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);

		err = br_multicast_set_router(br, multicast_router);
		if (err)
			return err;
	}
1165 1166 1167 1168

	if (data[IFLA_BR_MCAST_SNOOPING]) {
		u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);

1169
		br_multicast_toggle(br, mcast_snooping);
1170
	}
1171 1172 1173 1174 1175

	if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
		u8 val;

		val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
1176
		br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val);
1177
	}
1178 1179 1180 1181 1182 1183 1184 1185

	if (data[IFLA_BR_MCAST_QUERIER]) {
		u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);

		err = br_multicast_set_querier(br, mcast_querier);
		if (err)
			return err;
	}
1186

1187 1188 1189
	if (data[IFLA_BR_MCAST_HASH_ELASTICITY])
		br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n",
			RHT_ELASTICITY);
1190

1191 1192
	if (data[IFLA_BR_MCAST_HASH_MAX])
		br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
1193 1194 1195 1196 1197 1198

	if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
		u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);

		br->multicast_last_member_count = val;
	}
1199 1200 1201 1202 1203 1204

	if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
		u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);

		br->multicast_startup_query_count = val;
	}
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240

	if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);

		br->multicast_last_member_interval = clock_t_to_jiffies(val);
	}

	if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);

		br->multicast_membership_interval = clock_t_to_jiffies(val);
	}

	if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);

		br->multicast_querier_interval = clock_t_to_jiffies(val);
	}

	if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);

		br->multicast_query_interval = clock_t_to_jiffies(val);
	}

	if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);

		br->multicast_query_response_interval = clock_t_to_jiffies(val);
	}

	if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);

		br->multicast_startup_query_interval = clock_t_to_jiffies(val);
	}
1241 1242 1243 1244 1245

	if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
		__u8 mcast_stats;

		mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
1246
		br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats);
1247
	}
1248 1249 1250 1251 1252 1253 1254 1255 1256

	if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
		__u8 igmp_version;

		igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]);
		err = br_multicast_set_igmp_version(br, igmp_version);
		if (err)
			return err;
	}
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267

#if IS_ENABLED(CONFIG_IPV6)
	if (data[IFLA_BR_MCAST_MLD_VERSION]) {
		__u8 mld_version;

		mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]);
		err = br_multicast_set_mld_version(br, mld_version);
		if (err)
			return err;
	}
#endif
1268
#endif
1269 1270 1271 1272
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
	if (data[IFLA_BR_NF_CALL_IPTABLES]) {
		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);

1273
		br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val);
1274 1275 1276 1277 1278
	}

	if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);

1279
		br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val);
1280 1281 1282 1283 1284
	}

	if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);

1285
		br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val);
1286 1287
	}
#endif
1288

1289 1290 1291 1292 1293 1294 1295 1296 1297
	if (data[IFLA_BR_MULTI_BOOLOPT]) {
		struct br_boolopt_multi *bm;

		bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]);
		err = br_boolopt_multi_toggle(br, bm, extack);
		if (err)
			return err;
	}

1298 1299 1300
	return 0;
}

1301
static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1302 1303
			  struct nlattr *tb[], struct nlattr *data[],
			  struct netlink_ext_ack *extack)
1304 1305 1306 1307
{
	struct net_bridge *br = netdev_priv(dev);
	int err;

1308 1309 1310 1311
	err = register_netdevice(dev);
	if (err)
		return err;

1312 1313 1314 1315 1316 1317
	if (tb[IFLA_ADDRESS]) {
		spin_lock_bh(&br->lock);
		br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
		spin_unlock_bh(&br->lock);
	}

1318
	err = br_changelink(dev, tb, data, extack);
1319
	if (err)
1320 1321
		br_dev_delete(dev, NULL);

1322
	return err;
1323 1324
}

1325 1326 1327 1328 1329
static size_t br_get_size(const struct net_device *brdev)
{
	return nla_total_size(sizeof(u32)) +	/* IFLA_BR_FORWARD_DELAY  */
	       nla_total_size(sizeof(u32)) +	/* IFLA_BR_HELLO_TIME */
	       nla_total_size(sizeof(u32)) +	/* IFLA_BR_MAX_AGE */
1330 1331 1332
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_AGEING_TIME */
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_STP_STATE */
	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_PRIORITY */
1333
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_FILTERING */
1334 1335
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
	       nla_total_size(sizeof(__be16)) +	/* IFLA_BR_VLAN_PROTOCOL */
1336
	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_VLAN_DEFAULT_PVID */
1337
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_STATS_ENABLED */
1338
	       nla_total_size(sizeof(u8)) +	/* IFLA_BR_VLAN_STATS_PER_PORT */
1339
#endif
1340
	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_GROUP_FWD_MASK */
1341
	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_ROOT_ID */
1342
	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_BRIDGE_ID */
1343
	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_ROOT_PORT */
1344
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_ROOT_PATH_COST */
1345 1346
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE */
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
1347 1348 1349 1350
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
1351
	       nla_total_size(ETH_ALEN) +       /* IFLA_BR_GROUP_ADDR */
1352 1353
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_ROUTER */
1354
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_SNOOPING */
1355
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
1356
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERIER */
1357
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_STATS_ENABLED */
1358 1359 1360 1361
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_ELASTICITY */
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_MAX */
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
1362 1363 1364 1365 1366 1367
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
1368
	       nla_total_size(sizeof(u8)) +	/* IFLA_BR_MCAST_IGMP_VERSION */
1369
	       nla_total_size(sizeof(u8)) +	/* IFLA_BR_MCAST_MLD_VERSION */
1370 1371 1372 1373 1374
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IPTABLES */
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IP6TABLES */
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_ARPTABLES */
1375
#endif
1376
	       nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */
1377 1378 1379 1380 1381 1382 1383 1384 1385
	       0;
}

static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
{
	struct net_bridge *br = netdev_priv(brdev);
	u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
	u32 hello_time = jiffies_to_clock_t(br->hello_time);
	u32 age_time = jiffies_to_clock_t(br->max_age);
1386 1387 1388
	u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
	u32 stp_enabled = br->stp_enabled;
	u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
1389
	u8 vlan_enabled = br_vlan_enabled(br->dev);
1390
	struct br_boolopt_multi bm;
1391 1392 1393
	u64 clockval;

	clockval = br_timer_value(&br->hello_timer);
1394
	if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
1395 1396
		return -EMSGSIZE;
	clockval = br_timer_value(&br->tcn_timer);
1397
	if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
1398 1399
		return -EMSGSIZE;
	clockval = br_timer_value(&br->topology_change_timer);
1400 1401
	if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
			      IFLA_BR_PAD))
1402
		return -EMSGSIZE;
1403
	clockval = br_timer_value(&br->gc_work.timer);
1404
	if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
1405
		return -EMSGSIZE;
1406

1407
	br_boolopt_multi_get(br, &bm);
1408 1409
	if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
	    nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
1410 1411 1412
	    nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
	    nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
	    nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
1413
	    nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
1414
	    nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
1415 1416 1417 1418 1419
	    nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
	    nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
		    &br->bridge_id) ||
	    nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
		    &br->designated_root) ||
1420
	    nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
1421 1422 1423
	    nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
1424
		       br->topology_change_detected) ||
1425 1426
	    nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) ||
	    nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm))
1427 1428
		return -EMSGSIZE;

1429
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
1430
	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
1431
	    nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
1432
	    nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
1433 1434
		       br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
	    nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
1435
		       br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
1436 1437
		return -EMSGSIZE;
#endif
1438
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1439
	if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
1440 1441
	    nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING,
		       br_opt_get(br, BROPT_MULTICAST_ENABLED)) ||
1442
	    nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
1443 1444 1445
		       br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) ||
	    nla_put_u8(skb, IFLA_BR_MCAST_QUERIER,
		       br_opt_get(br, BROPT_MULTICAST_QUERIER)) ||
1446
	    nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
1447
		       br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) ||
1448
	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) ||
1449 1450
	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
	    nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
1451 1452
			br->multicast_last_member_count) ||
	    nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
1453 1454 1455
			br->multicast_startup_query_count) ||
	    nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
		       br->multicast_igmp_version))
1456
		return -EMSGSIZE;
1457 1458 1459 1460 1461
#if IS_ENABLED(CONFIG_IPV6)
	if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
		       br->multicast_mld_version))
		return -EMSGSIZE;
#endif
1462
	clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
1463 1464
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
			      IFLA_BR_PAD))
1465 1466
		return -EMSGSIZE;
	clockval = jiffies_to_clock_t(br->multicast_membership_interval);
1467 1468
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
			      IFLA_BR_PAD))
1469 1470
		return -EMSGSIZE;
	clockval = jiffies_to_clock_t(br->multicast_querier_interval);
1471 1472
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
			      IFLA_BR_PAD))
1473 1474
		return -EMSGSIZE;
	clockval = jiffies_to_clock_t(br->multicast_query_interval);
1475 1476
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
			      IFLA_BR_PAD))
1477 1478
		return -EMSGSIZE;
	clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
1479 1480
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
			      IFLA_BR_PAD))
1481 1482
		return -EMSGSIZE;
	clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
1483 1484
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
			      IFLA_BR_PAD))
1485
		return -EMSGSIZE;
1486
#endif
1487 1488
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
	if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
1489
		       br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) ||
1490
	    nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
1491
		       br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) ||
1492
	    nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
1493
		       br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0))
1494 1495
		return -EMSGSIZE;
#endif
1496

1497 1498 1499
	return 0;
}

1500
static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
1501
{
1502
	struct net_bridge_port *p = NULL;
1503 1504
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *v;
1505
	struct net_bridge *br;
1506 1507
	int numvls = 0;

1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
	switch (attr) {
	case IFLA_STATS_LINK_XSTATS:
		br = netdev_priv(dev);
		vg = br_vlan_group(br);
		break;
	case IFLA_STATS_LINK_XSTATS_SLAVE:
		p = br_port_get_rtnl(dev);
		if (!p)
			return 0;
		br = p->br;
		vg = nbp_vlan_group(p);
		break;
	default:
		return 0;
	}

1524 1525 1526 1527 1528
	if (vg) {
		/* we need to count all, even placeholder entries */
		list_for_each_entry(v, &vg->vlan_list, vlist)
			numvls++;
	}
1529 1530

	return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
1531
	       nla_total_size(sizeof(struct br_mcast_stats)) +
1532 1533 1534
	       nla_total_size(0);
}

1535 1536 1537
static int br_fill_linkxstats(struct sk_buff *skb,
			      const struct net_device *dev,
			      int *prividx, int attr)
1538
{
1539 1540 1541 1542 1543 1544 1545
	struct nlattr *nla __maybe_unused;
	struct net_bridge_port *p = NULL;
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *v;
	struct net_bridge *br;
	struct nlattr *nest;
	int vl_idx = 0;
1546 1547 1548

	switch (attr) {
	case IFLA_STATS_LINK_XSTATS:
1549 1550
		br = netdev_priv(dev);
		vg = br_vlan_group(br);
1551 1552
		break;
	case IFLA_STATS_LINK_XSTATS_SLAVE:
1553 1554 1555 1556 1557
		p = br_port_get_rtnl(dev);
		if (!p)
			return 0;
		br = p->br;
		vg = nbp_vlan_group(p);
1558
		break;
1559 1560
	default:
		return -EINVAL;
1561 1562
	}

1563
	nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BRIDGE);
1564 1565 1566
	if (!nest)
		return -EMSGSIZE;

1567
	if (vg) {
1568 1569 1570
		u16 pvid;

		pvid = br_get_pvid(vg);
1571 1572 1573 1574 1575 1576 1577 1578
		list_for_each_entry(v, &vg->vlan_list, vlist) {
			struct bridge_vlan_xstats vxi;
			struct br_vlan_stats stats;

			if (++vl_idx < *prividx)
				continue;
			memset(&vxi, 0, sizeof(vxi));
			vxi.vid = v->vid;
1579
			vxi.flags = v->flags;
1580 1581
			if (v->vid == pvid)
				vxi.flags |= BRIDGE_VLAN_INFO_PVID;
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
			br_vlan_get_stats(v, &stats);
			vxi.rx_bytes = stats.rx_bytes;
			vxi.rx_packets = stats.rx_packets;
			vxi.tx_bytes = stats.tx_bytes;
			vxi.tx_packets = stats.tx_packets;

			if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
				goto nla_put_failure;
		}
	}

#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
	if (++vl_idx >= *prividx) {
		nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
					sizeof(struct br_mcast_stats),
					BRIDGE_XSTATS_PAD);
		if (!nla)
1599
			goto nla_put_failure;
1600
		br_multicast_get_stats(br, p, nla_data(nla));
1601
	}
1602
#endif
V
Vivien Didelot 已提交
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615

	if (p) {
		nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_STP,
					sizeof(p->stp_xstats),
					BRIDGE_XSTATS_PAD);
		if (!nla)
			goto nla_put_failure;

		spin_lock_bh(&br->lock);
		memcpy(nla_data(nla), &p->stp_xstats, sizeof(p->stp_xstats));
		spin_unlock_bh(&br->lock);
	}

1616 1617
	nla_nest_end(skb, nest);
	*prividx = 0;
1618

1619 1620 1621 1622 1623 1624 1625 1626
	return 0;

nla_put_failure:
	nla_nest_end(skb, nest);
	*prividx = vl_idx;

	return -EMSGSIZE;
}
1627

1628
static struct rtnl_af_ops br_af_ops __read_mostly = {
1629
	.family			= AF_BRIDGE,
1630
	.get_link_af_size	= br_get_link_af_size_filtered,
1631 1632
};

1633
struct rtnl_link_ops br_link_ops __read_mostly = {
1634 1635 1636
	.kind			= "bridge",
	.priv_size		= sizeof(struct net_bridge),
	.setup			= br_dev_setup,
1637
	.maxtype		= IFLA_BR_MAX,
1638
	.policy			= br_policy,
1639 1640
	.validate		= br_validate,
	.newlink		= br_dev_newlink,
1641
	.changelink		= br_changelink,
1642
	.dellink		= br_dev_delete,
1643 1644
	.get_size		= br_get_size,
	.fill_info		= br_fill_info,
1645 1646
	.fill_linkxstats	= br_fill_linkxstats,
	.get_linkxstats_size	= br_get_linkxstats_size,
1647 1648 1649 1650

	.slave_maxtype		= IFLA_BRPORT_MAX,
	.slave_policy		= br_port_policy,
	.slave_changelink	= br_port_slave_changelink,
1651 1652
	.get_slave_size		= br_port_get_slave_size,
	.fill_slave_info	= br_port_fill_slave_info,
1653
};
1654

1655
int __init br_netlink_init(void)
1656
{
1657 1658 1659
	int err;

	br_mdb_init();
1660
	br_vlan_rtnl_init();
1661
	rtnl_af_register(&br_af_ops);
1662

1663 1664 1665 1666
	err = rtnl_link_register(&br_link_ops);
	if (err)
		goto out_af;

1667
	return 0;
1668 1669 1670

out_af:
	rtnl_af_unregister(&br_af_ops);
1671 1672
	br_mdb_uninit();
	return err;
1673 1674
}

1675
void br_netlink_fini(void)
1676
{
1677
	br_mdb_uninit();
1678
	br_vlan_rtnl_uninit();
1679
	rtnl_af_unregister(&br_af_ops);
1680
	rtnl_link_unregister(&br_link_ops);
1681
}