br_netlink.c 47.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *	Bridge netlink control interface
 *
 *	Authors:
 *	Stephen Hemminger		<shemminger@osdl.org>
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 */

#include <linux/kernel.h>
14
#include <linux/slab.h>
15
#include <linux/etherdevice.h>
16
#include <net/rtnetlink.h>
17
#include <net/net_namespace.h>
18
#include <net/sock.h>
19
#include <uapi/linux/if_bridge.h>
20

21
#include "br_private.h"
22
#include "br_private_stp.h"
23
#include "br_private_tunnel.h"
24

25
static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
26
				u32 filter_mask)
27
{
28 29
	struct net_bridge_vlan *v;
	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
30
	u16 flags, pvid;
31 32 33 34 35
	int num_vlans = 0;

	if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
		return 0;

36
	pvid = br_get_pvid(vg);
37
	/* Count number of vlan infos */
38
	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
39
		flags = 0;
40 41 42 43
		/* only a context, bridge vlan not activated */
		if (!br_vlan_should_use(v))
			continue;
		if (v->vid == pvid)
44 45
			flags |= BRIDGE_VLAN_INFO_PVID;

46
		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
47 48 49 50
			flags |= BRIDGE_VLAN_INFO_UNTAGGED;

		if (vid_range_start == 0) {
			goto initvars;
51
		} else if ((v->vid - vid_range_end) == 1 &&
52
			flags == vid_range_flags) {
53
			vid_range_end = v->vid;
54 55 56 57 58 59 60 61
			continue;
		} else {
			if ((vid_range_end - vid_range_start) > 0)
				num_vlans += 2;
			else
				num_vlans += 1;
		}
initvars:
62 63
		vid_range_start = v->vid;
		vid_range_end = v->vid;
64 65 66 67 68 69 70 71 72 73 74 75 76
		vid_range_flags = flags;
	}

	if (vid_range_start != 0) {
		if ((vid_range_end - vid_range_start) > 0)
			num_vlans += 2;
		else
			num_vlans += 1;
	}

	return num_vlans;
}

77
static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
78
				 u32 filter_mask)
79
{
80 81
	int num_vlans;

82 83 84 85 86 87
	if (!vg)
		return 0;

	if (filter_mask & RTEXT_FILTER_BRVLAN)
		return vg->num_vlans;

88 89 90 91 92
	rcu_read_lock();
	num_vlans = __get_num_vlan_infos(vg, filter_mask);
	rcu_read_unlock();

	return num_vlans;
93 94
}

95 96
static size_t br_get_link_af_size_filtered(const struct net_device *dev,
					   u32 filter_mask)
97
{
98
	struct net_bridge_vlan_group *vg = NULL;
99
	struct net_bridge_port *p = NULL;
100
	struct net_bridge *br;
101
	int num_vlan_infos;
102
	size_t vinfo_sz = 0;
103

104
	rcu_read_lock();
105
	if (netif_is_bridge_port(dev)) {
106
		p = br_port_get_rcu(dev);
107
		vg = nbp_vlan_group_rcu(p);
108 109
	} else if (dev->priv_flags & IFF_EBRIDGE) {
		br = netdev_priv(dev);
110
		vg = br_vlan_group_rcu(br);
111
	}
112
	num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
113
	rcu_read_unlock();
114

115 116 117
	if (p && (p->flags & BR_VLAN_TUNNEL))
		vinfo_sz += br_get_vlan_tunnel_info_size(vg);

118
	/* Each VLAN is returned in bridge_vlan_info along with flags */
119 120 121
	vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));

	return vinfo_sz;
122 123
}

124 125 126 127 128 129
static inline size_t br_port_info_size(void)
{
	return nla_total_size(1)	/* IFLA_BRPORT_STATE  */
		+ nla_total_size(2)	/* IFLA_BRPORT_PRIORITY */
		+ nla_total_size(4)	/* IFLA_BRPORT_COST */
		+ nla_total_size(1)	/* IFLA_BRPORT_MODE */
130
		+ nla_total_size(1)	/* IFLA_BRPORT_GUARD */
S
stephen hemminger 已提交
131
		+ nla_total_size(1)	/* IFLA_BRPORT_PROTECT */
132
		+ nla_total_size(1)	/* IFLA_BRPORT_FAST_LEAVE */
F
Felix Fietkau 已提交
133
		+ nla_total_size(1)	/* IFLA_BRPORT_MCAST_TO_UCAST */
134
		+ nla_total_size(1)	/* IFLA_BRPORT_LEARNING */
135
		+ nla_total_size(1)	/* IFLA_BRPORT_UNICAST_FLOOD */
136 137
		+ nla_total_size(1)	/* IFLA_BRPORT_MCAST_FLOOD */
		+ nla_total_size(1)	/* IFLA_BRPORT_BCAST_FLOOD */
138
		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP */
139
		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP_WIFI */
140
		+ nla_total_size(1)	/* IFLA_BRPORT_VLAN_TUNNEL */
141
		+ nla_total_size(1)	/* IFLA_BRPORT_NEIGH_SUPPRESS */
142
		+ nla_total_size(1)	/* IFLA_BRPORT_ISOLATED */
143
		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_ROOT_ID */
144
		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_BRIDGE_ID */
145 146
		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_PORT */
		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_COST */
147 148
		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_ID */
		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_NO */
149 150
		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_CONFIG_PENDING */
151 152 153
		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
154 155 156
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_MULTICAST_ROUTER */
#endif
157
		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_GROUP_FWD_MASK */
158 159 160
		+ 0;
}

161
static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
162 163
{
	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
164 165 166 167 168 169
		+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
		+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
		+ nla_total_size(4) /* IFLA_MASTER */
		+ nla_total_size(4) /* IFLA_MTU */
		+ nla_total_size(4) /* IFLA_LINK */
		+ nla_total_size(1) /* IFLA_OPERSTATE */
170
		+ nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
171
		+ nla_total_size(br_get_link_af_size_filtered(dev,
172 173
				 filter_mask)) /* IFLA_AF_SPEC */
		+ nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */
174 175 176 177 178 179
}

static int br_port_fill_attrs(struct sk_buff *skb,
			      const struct net_bridge_port *p)
{
	u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
180
	struct net_bridge_port *backup_p;
181
	u64 timerval;
182 183 184 185

	if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
	    nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
	    nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
186
	    nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
S
stephen hemminger 已提交
187
	    nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
188 189 190 191
	    nla_put_u8(skb, IFLA_BRPORT_PROTECT,
		       !!(p->flags & BR_ROOT_BLOCK)) ||
	    nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
		       !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
F
Felix Fietkau 已提交
192 193
	    nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
		       !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
194
	    nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
195 196 197 198
	    nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
		       !!(p->flags & BR_FLOOD)) ||
	    nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
		       !!(p->flags & BR_MCAST_FLOOD)) ||
199 200
	    nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD,
		       !!(p->flags & BR_BCAST_FLOOD)) ||
201 202
	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
203 204
		       !!(p->flags & BR_PROXYARP_WIFI)) ||
	    nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
205 206
		    &p->designated_root) ||
	    nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
207 208
		    &p->designated_bridge) ||
	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
209 210
	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
	    nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
211 212 213
	    nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
	    nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
		       p->topology_change_ack) ||
214 215
	    nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
	    nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
216
							BR_VLAN_TUNNEL)) ||
217 218
	    nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) ||
	    nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS,
219 220
		       !!(p->flags & BR_NEIGH_SUPPRESS)) ||
	    nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)))
221 222
		return -EMSGSIZE;

223
	timerval = br_timer_value(&p->message_age_timer);
224 225
	if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
			      IFLA_BRPORT_PAD))
226 227
		return -EMSGSIZE;
	timerval = br_timer_value(&p->forward_delay_timer);
228 229
	if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
			      IFLA_BRPORT_PAD))
230 231
		return -EMSGSIZE;
	timerval = br_timer_value(&p->hold_timer);
232 233
	if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
			      IFLA_BRPORT_PAD))
234 235
		return -EMSGSIZE;

236 237 238 239 240 241
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
	if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
		       p->multicast_router))
		return -EMSGSIZE;
#endif

242 243 244 245 246 247 248 249
	/* we might be called only with br->lock */
	rcu_read_lock();
	backup_p = rcu_dereference(p->backup_port);
	if (backup_p)
		nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT,
			    backup_p->dev->ifindex);
	rcu_read_unlock();

250
	return 0;
251 252
}

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
				    u16 vid_end, u16 flags)
{
	struct  bridge_vlan_info vinfo;

	if ((vid_end - vid_start) > 0) {
		/* add range to skb */
		vinfo.vid = vid_start;
		vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
			    sizeof(vinfo), &vinfo))
			goto nla_put_failure;

		vinfo.vid = vid_end;
		vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
			    sizeof(vinfo), &vinfo))
			goto nla_put_failure;
	} else {
		vinfo.vid = vid_start;
		vinfo.flags = flags;
		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
			    sizeof(vinfo), &vinfo))
			goto nla_put_failure;
	}

	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
286
					 struct net_bridge_vlan_group *vg)
287
{
288 289
	struct net_bridge_vlan *v;
	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
290
	u16 flags, pvid;
291 292 293 294 295 296
	int err = 0;

	/* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
	 * and mark vlan info with begin and end flags
	 * if vlaninfo represents a range
	 */
297
	pvid = br_get_pvid(vg);
298
	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
299
		flags = 0;
300 301 302
		if (!br_vlan_should_use(v))
			continue;
		if (v->vid == pvid)
303 304
			flags |= BRIDGE_VLAN_INFO_PVID;

305
		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
306 307 308 309
			flags |= BRIDGE_VLAN_INFO_UNTAGGED;

		if (vid_range_start == 0) {
			goto initvars;
310
		} else if ((v->vid - vid_range_end) == 1 &&
311
			flags == vid_range_flags) {
312
			vid_range_end = v->vid;
313 314 315 316 317 318 319 320 321 322
			continue;
		} else {
			err = br_fill_ifvlaninfo_range(skb, vid_range_start,
						       vid_range_end,
						       vid_range_flags);
			if (err)
				return err;
		}

initvars:
323 324
		vid_range_start = v->vid;
		vid_range_end = v->vid;
325 326 327
		vid_range_flags = flags;
	}

328 329 330 331 332 333 334 335
	if (vid_range_start != 0) {
		/* Call it once more to send any left over vlans */
		err = br_fill_ifvlaninfo_range(skb, vid_range_start,
					       vid_range_end,
					       vid_range_flags);
		if (err)
			return err;
	}
336 337 338 339 340

	return 0;
}

static int br_fill_ifvlaninfo(struct sk_buff *skb,
341
			      struct net_bridge_vlan_group *vg)
342 343
{
	struct bridge_vlan_info vinfo;
344
	struct net_bridge_vlan *v;
345
	u16 pvid;
346

347
	pvid = br_get_pvid(vg);
348
	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
349 350 351 352
		if (!br_vlan_should_use(v))
			continue;

		vinfo.vid = v->vid;
353
		vinfo.flags = 0;
354
		if (v->vid == pvid)
355 356
			vinfo.flags |= BRIDGE_VLAN_INFO_PVID;

357
		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
358 359 360 361 362 363 364 365 366 367 368 369 370
			vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;

		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
			    sizeof(vinfo), &vinfo))
			goto nla_put_failure;
	}

	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

371 372 373 374
/*
 * Create one netlink message for one interface
 * Contains port and master info as well as carrier and bridge state.
 */
375
static int br_fill_ifinfo(struct sk_buff *skb,
376
			  const struct net_bridge_port *port,
377 378
			  u32 pid, u32 seq, int event, unsigned int flags,
			  u32 filter_mask, const struct net_device *dev)
379
{
380
	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
381
	struct net_bridge *br;
382
	struct ifinfomsg *hdr;
383 384
	struct nlmsghdr *nlh;

385 386 387 388 389
	if (port)
		br = port->br;
	else
		br = netdev_priv(dev);

390 391
	br_debug(br, "br_fill_info event %d port %s master %s\n",
		     event, dev->name, br->dev->name);
392

393 394
	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
	if (nlh == NULL)
395
		return -EMSGSIZE;
396

397 398 399 400 401 402 403
	hdr = nlmsg_data(nlh);
	hdr->ifi_family = AF_BRIDGE;
	hdr->__ifi_pad = 0;
	hdr->ifi_type = dev->type;
	hdr->ifi_index = dev->ifindex;
	hdr->ifi_flags = dev_get_flags(dev);
	hdr->ifi_change = 0;
404

D
David S. Miller 已提交
405 406 407 408 409 410
	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
	    nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
	    (dev->addr_len &&
	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
411 412
	    (dev->ifindex != dev_get_iflink(dev) &&
	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
D
David S. Miller 已提交
413
		goto nla_put_failure;
414

415
	if (event == RTM_NEWLINK && port) {
416 417 418 419 420 421 422 423
		struct nlattr *nest
			= nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);

		if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
			goto nla_put_failure;
		nla_nest_end(skb, nest);
	}

424
	/* Check if  the VID information is requested */
425 426
	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
427
		struct net_bridge_vlan_group *vg;
428 429
		struct nlattr *af;
		int err;
430

431 432
		/* RCU needed because of the VLAN locking rules (rcu || rtnl) */
		rcu_read_lock();
433
		if (port)
434
			vg = nbp_vlan_group_rcu(port);
435
		else
436
			vg = br_vlan_group_rcu(br);
437

438 439
		if (!vg || !vg->num_vlans) {
			rcu_read_unlock();
440
			goto done;
441
		}
442
		af = nla_nest_start(skb, IFLA_AF_SPEC);
443 444
		if (!af) {
			rcu_read_unlock();
445
			goto nla_put_failure;
446
		}
447
		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
448
			err = br_fill_ifvlaninfo_compressed(skb, vg);
449
		else
450
			err = br_fill_ifvlaninfo(skb, vg);
451 452 453

		if (port && (port->flags & BR_VLAN_TUNNEL))
			err = br_fill_vlan_tunnel_info(skb, vg);
454
		rcu_read_unlock();
455 456
		if (err)
			goto nla_put_failure;
457 458 459 460
		nla_nest_end(skb, af);
	}

done:
461 462
	nlmsg_end(skb, nlh);
	return 0;
463

464
nla_put_failure:
465 466
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
467 468
}

469 470 471
/* Notify listeners of a change in bridge or port information */
void br_ifinfo_notify(int event, const struct net_bridge *br,
		      const struct net_bridge_port *port)
472
{
473 474
	u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
	struct net_device *dev;
475
	struct sk_buff *skb;
476
	int err = -ENOBUFS;
477 478
	struct net *net;
	u16 port_no = 0;
479

480
	if (WARN_ON(!port && !br))
481 482
		return;

483 484 485 486 487 488 489 490 491 492
	if (port) {
		dev = port->dev;
		br = port->br;
		port_no = port->port_no;
	} else {
		dev = br->dev;
	}

	net = dev_net(dev);
	br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event);
493

494
	skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC);
495 496 497
	if (skb == NULL)
		goto errout;

498
	err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev);
499 500 501 502 503 504
	if (err < 0) {
		/* -EMSGSIZE implies BUG in br_nlmsg_size() */
		WARN_ON(err == -EMSGSIZE);
		kfree_skb(skb);
		goto errout;
	}
505 506
	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
	return;
507
errout:
508
	rtnl_set_sk_err(net, RTNLGRP_LINK, err);
509 510 511 512 513
}

/*
 * Dump information about all ports, in response to GETLINK
 */
J
John Fastabend 已提交
514
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
515
	       struct net_device *dev, u32 filter_mask, int nlflags)
516
{
517
	struct net_bridge_port *port = br_port_get_rtnl(dev);
J
John Fastabend 已提交
518

519 520
	if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
	    !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
521
		return 0;
522

523
	return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
524
			      filter_mask, dev);
525 526
}

527
static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
528 529
			int cmd, struct bridge_vlan_info *vinfo, bool *changed,
			struct netlink_ext_ack *extack)
530
{
531
	bool curr_change;
532 533 534 535 536
	int err = 0;

	switch (cmd) {
	case RTM_SETLINK:
		if (p) {
537 538 539
			/* if the MASTER flag is set this will act on the global
			 * per-VLAN entry as well
			 */
540
			err = nbp_vlan_add(p, vinfo->vid, vinfo->flags,
541
					   &curr_change, extack);
542
		} else {
543
			vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
544
			err = br_vlan_add(br, vinfo->vid, vinfo->flags,
545
					  &curr_change, extack);
546
		}
547
		if (curr_change)
548
			*changed = true;
549 550 551 552
		break;

	case RTM_DELLINK:
		if (p) {
553 554 555 556 557 558 559 560
			if (!nbp_vlan_delete(p, vinfo->vid))
				*changed = true;

			if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) &&
			    !br_vlan_delete(p->br, vinfo->vid))
				*changed = true;
		} else if (!br_vlan_delete(br, vinfo->vid)) {
			*changed = true;
561 562 563 564 565 566
		}
		break;
	}

	return err;
}
567

568 569 570
static int br_process_vlan_info(struct net_bridge *br,
				struct net_bridge_port *p, int cmd,
				struct bridge_vlan_info *vinfo_curr,
571
				struct bridge_vlan_info **vinfo_last,
572 573
				bool *changed,
				struct netlink_ext_ack *extack)
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
{
	if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK)
		return -EINVAL;

	if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
		/* check if we are already processing a range */
		if (*vinfo_last)
			return -EINVAL;
		*vinfo_last = vinfo_curr;
		/* don't allow range of pvids */
		if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID)
			return -EINVAL;
		return 0;
	}

	if (*vinfo_last) {
		struct bridge_vlan_info tmp_vinfo;
		int v, err;

		if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END))
			return -EINVAL;

		if (vinfo_curr->vid <= (*vinfo_last)->vid)
			return -EINVAL;

		memcpy(&tmp_vinfo, *vinfo_last,
		       sizeof(struct bridge_vlan_info));
		for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
			tmp_vinfo.vid = v;
603 604
			err = br_vlan_info(br, p, cmd, &tmp_vinfo, changed,
					   extack);
605 606 607 608 609
			if (err)
				break;
		}
		*vinfo_last = NULL;

610
		return err;
611 612
	}

613
	return br_vlan_info(br, p, cmd, vinfo_curr, changed, extack);
614 615
}

616 617 618
static int br_afspec(struct net_bridge *br,
		     struct net_bridge_port *p,
		     struct nlattr *af_spec,
619 620
		     int cmd, bool *changed,
		     struct netlink_ext_ack *extack)
621
{
622 623
	struct bridge_vlan_info *vinfo_curr = NULL;
	struct bridge_vlan_info *vinfo_last = NULL;
624
	struct nlattr *attr;
625 626 627
	struct vtunnel_info tinfo_last = {};
	struct vtunnel_info tinfo_curr = {};
	int err = 0, rem;
628

629
	nla_for_each_nested(attr, af_spec, rem) {
630 631 632
		err = 0;
		switch (nla_type(attr)) {
		case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
633
			if (!p || !(p->flags & BR_VLAN_TUNNEL))
634
				return -EINVAL;
635 636 637 638 639
			err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
			if (err)
				return err;
			err = br_process_vlan_tunnel_info(br, p, cmd,
							  &tinfo_curr,
640 641
							  &tinfo_last,
							  changed);
642 643 644 645 646
			if (err)
				return err;
			break;
		case IFLA_BRIDGE_VLAN_INFO:
			if (nla_len(attr) != sizeof(struct bridge_vlan_info))
647
				return -EINVAL;
648 649
			vinfo_curr = nla_data(attr);
			err = br_process_vlan_info(br, p, cmd, vinfo_curr,
650 651
						   &vinfo_last, changed,
						   extack);
652 653 654
			if (err)
				return err;
			break;
655
		}
656 657 658 659 660
	}

	return err;
}

661
static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
662 663 664 665
	[IFLA_BRPORT_STATE]	= { .type = NLA_U8 },
	[IFLA_BRPORT_COST]	= { .type = NLA_U32 },
	[IFLA_BRPORT_PRIORITY]	= { .type = NLA_U16 },
	[IFLA_BRPORT_MODE]	= { .type = NLA_U8 },
666
	[IFLA_BRPORT_GUARD]	= { .type = NLA_U8 },
S
stephen hemminger 已提交
667
	[IFLA_BRPORT_PROTECT]	= { .type = NLA_U8 },
668
	[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
669
	[IFLA_BRPORT_LEARNING]	= { .type = NLA_U8 },
670
	[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
671
	[IFLA_BRPORT_PROXYARP]	= { .type = NLA_U8 },
672
	[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
673
	[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
F
Felix Fietkau 已提交
674
	[IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
675 676
	[IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
	[IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
677
	[IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 },
678
	[IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 },
679
	[IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
680
	[IFLA_BRPORT_ISOLATED]	= { .type = NLA_U8 },
681
	[IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
682 683 684 685 686 687 688 689 690 691 692 693
};

/* Change the state of the port and notify spanning tree */
static int br_set_port_state(struct net_bridge_port *p, u8 state)
{
	if (state > BR_STATE_BLOCKING)
		return -EINVAL;

	/* if kernel STP is running, don't allow changes */
	if (p->br->stp_enabled == BR_KERNEL_STP)
		return -EBUSY;

694 695 696
	/* if device is not up, change is not allowed
	 * if link is not present, only allowable state is disabled
	 */
697
	if (!netif_running(p->dev) ||
698
	    (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
699 700
		return -ENETDOWN;

701
	br_set_state(p, state);
702 703 704 705 706
	br_port_state_selection(p->br);
	return 0;
}

/* Set/clear or port flags based on attribute */
707 708
static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
			    int attrtype, unsigned long mask)
709
{
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
	unsigned long flags;
	int err;

	if (!tb[attrtype])
		return 0;

	if (nla_get_u8(tb[attrtype]))
		flags = p->flags | mask;
	else
		flags = p->flags & ~mask;

	err = br_switchdev_set_port_flag(p, flags, mask);
	if (err)
		return err;

	p->flags = flags;
	return 0;
727 728 729 730 731
}

/* Process bridge protocol info on port */
static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
{
732
	unsigned long old_flags = p->flags;
733 734
	bool br_vlan_tunnel_old = false;
	int err;
735

736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
	err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
	if (err)
		return err;

	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
	if (err)
		return err;
779

780
	br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
781 782 783 784
	err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
	if (err)
		return err;

785 786 787
	if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
		nbp_vlan_tunnel_info_flush(p);

788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
	if (tb[IFLA_BRPORT_COST]) {
		err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
		if (err)
			return err;
	}

	if (tb[IFLA_BRPORT_PRIORITY]) {
		err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
		if (err)
			return err;
	}

	if (tb[IFLA_BRPORT_STATE]) {
		err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
		if (err)
			return err;
	}
805

806 807 808
	if (tb[IFLA_BRPORT_FLUSH])
		br_fdb_delete_by_port(p->br, p, 0, 0);

809 810 811 812 813 814 815 816 817
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
	if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
		u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);

		err = br_multicast_set_port_router(p, mcast_router);
		if (err)
			return err;
	}
#endif
818 819 820 821 822 823 824 825 826

	if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) {
		u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]);

		if (fwd_mask & BR_GROUPFWD_MACPAUSE)
			return -EINVAL;
		p->group_fwd_mask = fwd_mask;
	}

827 828 829 830 831
	err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS,
			       BR_NEIGH_SUPPRESS);
	if (err)
		return err;

832 833 834 835
	err = br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
	if (err)
		return err;

836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852
	if (tb[IFLA_BRPORT_BACKUP_PORT]) {
		struct net_device *backup_dev = NULL;
		u32 backup_ifindex;

		backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]);
		if (backup_ifindex) {
			backup_dev = __dev_get_by_index(dev_net(p->dev),
							backup_ifindex);
			if (!backup_dev)
				return -ENOENT;
		}

		err = nbp_backup_change(p, backup_dev);
		if (err)
			return err;
	}

853
	br_port_flags_change(p, old_flags ^ p->flags);
854 855 856 857
	return 0;
}

/* Change state and parameters on port. */
P
Petr Machata 已提交
858 859
int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags,
	       struct netlink_ext_ack *extack)
860
{
861 862 863
	struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
	struct nlattr *tb[IFLA_BRPORT_MAX + 1];
	struct net_bridge_port *p;
864
	struct nlattr *protinfo;
865
	struct nlattr *afspec;
866
	bool changed = false;
867
	int err = 0;
868

869 870
	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
871
	if (!protinfo && !afspec)
872
		return 0;
873

874
	p = br_port_get_rtnl(dev);
875
	/* We want to accept dev as bridge itself if the AF_SPEC
S
stephen hemminger 已提交
876
	 * is set to see if someone is setting vlan info on the bridge
877
	 */
878
	if (!p && !afspec)
879
		return -EINVAL;
880

881 882
	if (p && protinfo) {
		if (protinfo->nla_type & NLA_F_NESTED) {
883 884
			err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo,
					       br_port_policy, NULL);
885 886 887 888 889 890 891
			if (err)
				return err;

			spin_lock_bh(&p->br->lock);
			err = br_setport(p, tb);
			spin_unlock_bh(&p->br->lock);
		} else {
S
stephen hemminger 已提交
892
			/* Binary compatibility with old RSTP */
893 894 895 896 897 898 899
			if (nla_len(protinfo) < sizeof(u8))
				return -EINVAL;

			spin_lock_bh(&p->br->lock);
			err = br_set_port_state(p, nla_get_u8(protinfo));
			spin_unlock_bh(&p->br->lock);
		}
900
		if (err)
901
			goto out;
902
		changed = true;
903
	}
904

905
	if (afspec)
906
		err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack);
907

908
	if (changed)
909
		br_ifinfo_notify(RTM_NEWLINK, br, p);
910
out:
911
	return err;
912 913
}

914
/* Delete port information */
915
int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
916
{
917
	struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
918
	struct net_bridge_port *p;
919
	struct nlattr *afspec;
920
	bool changed = false;
921
	int err = 0;
922

923
	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
924 925 926 927 928 929 930 931
	if (!afspec)
		return 0;

	p = br_port_get_rtnl(dev);
	/* We want to accept dev as bridge itself as well */
	if (!p && !(dev->priv_flags & IFF_EBRIDGE))
		return -EINVAL;

932
	err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL);
933
	if (changed)
934 935 936
		/* Send RTM_NEWLINK because userspace
		 * expects RTM_NEWLINK for vlan dels
		 */
937
		br_ifinfo_notify(RTM_NEWLINK, br, p);
938 939 940

	return err;
}
941 942 943

static int br_validate(struct nlattr *tb[], struct nlattr *data[],
		       struct netlink_ext_ack *extack)
944 945 946 947 948 949 950 951
{
	if (tb[IFLA_ADDRESS]) {
		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
			return -EINVAL;
		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
			return -EADDRNOTAVAIL;
	}

952 953 954 955 956 957 958 959 960 961 962 963 964
	if (!data)
		return 0;

#ifdef CONFIG_BRIDGE_VLAN_FILTERING
	if (data[IFLA_BR_VLAN_PROTOCOL]) {
		switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
		case htons(ETH_P_8021Q):
		case htons(ETH_P_8021AD):
			break;
		default:
			return -EPROTONOSUPPORT;
		}
	}
965 966 967 968 969 970 971

	if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
		__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);

		if (defpvid >= VLAN_VID_MASK)
			return -EINVAL;
	}
972 973
#endif

974 975 976
	return 0;
}

977 978 979
static int br_port_slave_changelink(struct net_device *brdev,
				    struct net_device *dev,
				    struct nlattr *tb[],
980 981
				    struct nlattr *data[],
				    struct netlink_ext_ack *extack)
982
{
983 984 985
	struct net_bridge *br = netdev_priv(brdev);
	int ret;

986 987
	if (!data)
		return 0;
988 989 990 991 992 993

	spin_lock_bh(&br->lock);
	ret = br_setport(br_port_get_rtnl(dev), data);
	spin_unlock_bh(&br->lock);

	return ret;
994 995
}

996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
static int br_port_fill_slave_info(struct sk_buff *skb,
				   const struct net_device *brdev,
				   const struct net_device *dev)
{
	return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
}

static size_t br_port_get_slave_size(const struct net_device *brdev,
				     const struct net_device *dev)
{
	return br_port_info_size();
}

1009 1010 1011 1012
static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
	[IFLA_BR_FORWARD_DELAY]	= { .type = NLA_U32 },
	[IFLA_BR_HELLO_TIME]	= { .type = NLA_U32 },
	[IFLA_BR_MAX_AGE]	= { .type = NLA_U32 },
1013 1014 1015
	[IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
	[IFLA_BR_STP_STATE] = { .type = NLA_U32 },
	[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
1016
	[IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
1017
	[IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
1018
	[IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
1019 1020
	[IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
				 .len  = ETH_ALEN },
1021
	[IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
1022
	[IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
1023
	[IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
1024
	[IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
1025
	[IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
1026
	[IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
1027
	[IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
1028
	[IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
1029 1030 1031 1032 1033 1034
	[IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
	[IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
	[IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
	[IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
	[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
	[IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
1035 1036 1037
	[IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
	[IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
	[IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
1038
	[IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
1039
	[IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
1040
	[IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
1041
	[IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
1042
	[IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
1043
	[IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
1044 1045
	[IFLA_BR_MULTI_BOOLOPT] = { .type = NLA_EXACT_LEN,
				    .len = sizeof(struct br_boolopt_multi) },
1046 1047 1048
};

static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1049 1050
			 struct nlattr *data[],
			 struct netlink_ext_ack *extack)
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
{
	struct net_bridge *br = netdev_priv(brdev);
	int err;

	if (!data)
		return 0;

	if (data[IFLA_BR_FORWARD_DELAY]) {
		err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
		if (err)
			return err;
	}

	if (data[IFLA_BR_HELLO_TIME]) {
		err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
		if (err)
			return err;
	}

	if (data[IFLA_BR_MAX_AGE]) {
		err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
		if (err)
			return err;
	}

1076
	if (data[IFLA_BR_AGEING_TIME]) {
1077 1078 1079
		err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
		if (err)
			return err;
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
	}

	if (data[IFLA_BR_STP_STATE]) {
		u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);

		br_stp_set_enabled(br, stp_enabled);
	}

	if (data[IFLA_BR_PRIORITY]) {
		u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);

		br_stp_set_bridge_priority(br, priority);
	}

1094 1095 1096 1097 1098 1099 1100 1101
	if (data[IFLA_BR_VLAN_FILTERING]) {
		u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);

		err = __br_vlan_filter_toggle(br, vlan_filter);
		if (err)
			return err;
	}

1102 1103 1104 1105 1106 1107 1108 1109
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
	if (data[IFLA_BR_VLAN_PROTOCOL]) {
		__be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);

		err = __br_vlan_set_proto(br, vlan_proto);
		if (err)
			return err;
	}
1110 1111 1112 1113

	if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
		__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);

1114
		err = __br_vlan_set_default_pvid(br, defpvid, extack);
1115 1116 1117
		if (err)
			return err;
	}
1118 1119 1120 1121 1122 1123 1124 1125

	if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
		__u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);

		err = br_vlan_set_stats(br, vlan_stats);
		if (err)
			return err;
	}
1126 1127 1128 1129 1130 1131 1132 1133

	if (data[IFLA_BR_VLAN_STATS_PER_PORT]) {
		__u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]);

		err = br_vlan_set_stats_per_port(br, per_port);
		if (err)
			return err;
	}
1134 1135
#endif

1136 1137 1138 1139 1140 1141 1142 1143
	if (data[IFLA_BR_GROUP_FWD_MASK]) {
		u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);

		if (fwd_mask & BR_GROUPFWD_RESTRICTED)
			return -EINVAL;
		br->group_fwd_mask = fwd_mask;
	}

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
	if (data[IFLA_BR_GROUP_ADDR]) {
		u8 new_addr[ETH_ALEN];

		if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
			return -EINVAL;
		memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
		if (!is_link_local_ether_addr(new_addr))
			return -EINVAL;
		if (new_addr[5] == 1 ||		/* 802.3x Pause address */
		    new_addr[5] == 2 ||		/* 802.3ad Slow protocols */
		    new_addr[5] == 3)		/* 802.1X PAE address */
			return -EINVAL;
		spin_lock_bh(&br->lock);
		memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
		spin_unlock_bh(&br->lock);
1159
		br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true);
1160 1161 1162
		br_recalculate_fwd_mask(br);
	}

1163 1164 1165
	if (data[IFLA_BR_FDB_FLUSH])
		br_fdb_flush(br);

1166 1167 1168 1169 1170 1171 1172 1173
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
	if (data[IFLA_BR_MCAST_ROUTER]) {
		u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);

		err = br_multicast_set_router(br, multicast_router);
		if (err)
			return err;
	}
1174 1175 1176 1177

	if (data[IFLA_BR_MCAST_SNOOPING]) {
		u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);

1178
		br_multicast_toggle(br, mcast_snooping);
1179
	}
1180 1181 1182 1183 1184

	if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
		u8 val;

		val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
1185
		br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val);
1186
	}
1187 1188 1189 1190 1191 1192 1193 1194

	if (data[IFLA_BR_MCAST_QUERIER]) {
		u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);

		err = br_multicast_set_querier(br, mcast_querier);
		if (err)
			return err;
	}
1195

1196 1197 1198
	if (data[IFLA_BR_MCAST_HASH_ELASTICITY])
		br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n",
			RHT_ELASTICITY);
1199

1200 1201
	if (data[IFLA_BR_MCAST_HASH_MAX])
		br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
1202 1203 1204 1205 1206 1207

	if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
		u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);

		br->multicast_last_member_count = val;
	}
1208 1209 1210 1211 1212 1213

	if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
		u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);

		br->multicast_startup_query_count = val;
	}
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249

	if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);

		br->multicast_last_member_interval = clock_t_to_jiffies(val);
	}

	if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);

		br->multicast_membership_interval = clock_t_to_jiffies(val);
	}

	if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);

		br->multicast_querier_interval = clock_t_to_jiffies(val);
	}

	if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);

		br->multicast_query_interval = clock_t_to_jiffies(val);
	}

	if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);

		br->multicast_query_response_interval = clock_t_to_jiffies(val);
	}

	if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
		u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);

		br->multicast_startup_query_interval = clock_t_to_jiffies(val);
	}
1250 1251 1252 1253 1254

	if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
		__u8 mcast_stats;

		mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
1255
		br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats);
1256
	}
1257 1258 1259 1260 1261 1262 1263 1264 1265

	if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
		__u8 igmp_version;

		igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]);
		err = br_multicast_set_igmp_version(br, igmp_version);
		if (err)
			return err;
	}
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276

#if IS_ENABLED(CONFIG_IPV6)
	if (data[IFLA_BR_MCAST_MLD_VERSION]) {
		__u8 mld_version;

		mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]);
		err = br_multicast_set_mld_version(br, mld_version);
		if (err)
			return err;
	}
#endif
1277
#endif
1278 1279 1280 1281
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
	if (data[IFLA_BR_NF_CALL_IPTABLES]) {
		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);

1282
		br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val);
1283 1284 1285 1286 1287
	}

	if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);

1288
		br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val);
1289 1290 1291 1292 1293
	}

	if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);

1294
		br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val);
1295 1296
	}
#endif
1297

1298 1299 1300 1301 1302 1303 1304 1305 1306
	if (data[IFLA_BR_MULTI_BOOLOPT]) {
		struct br_boolopt_multi *bm;

		bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]);
		err = br_boolopt_multi_toggle(br, bm, extack);
		if (err)
			return err;
	}

1307 1308 1309
	return 0;
}

1310
static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1311 1312
			  struct nlattr *tb[], struct nlattr *data[],
			  struct netlink_ext_ack *extack)
1313 1314 1315 1316
{
	struct net_bridge *br = netdev_priv(dev);
	int err;

1317 1318 1319 1320
	err = register_netdevice(dev);
	if (err)
		return err;

1321 1322 1323 1324 1325 1326
	if (tb[IFLA_ADDRESS]) {
		spin_lock_bh(&br->lock);
		br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
		spin_unlock_bh(&br->lock);
	}

1327
	err = br_changelink(dev, tb, data, extack);
1328
	if (err)
1329 1330
		br_dev_delete(dev, NULL);

1331
	return err;
1332 1333
}

1334 1335 1336 1337 1338
static size_t br_get_size(const struct net_device *brdev)
{
	return nla_total_size(sizeof(u32)) +	/* IFLA_BR_FORWARD_DELAY  */
	       nla_total_size(sizeof(u32)) +	/* IFLA_BR_HELLO_TIME */
	       nla_total_size(sizeof(u32)) +	/* IFLA_BR_MAX_AGE */
1339 1340 1341
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_AGEING_TIME */
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_STP_STATE */
	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_PRIORITY */
1342
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_FILTERING */
1343 1344
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
	       nla_total_size(sizeof(__be16)) +	/* IFLA_BR_VLAN_PROTOCOL */
1345
	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_VLAN_DEFAULT_PVID */
1346
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_STATS_ENABLED */
1347
	       nla_total_size(sizeof(u8)) +	/* IFLA_BR_VLAN_STATS_PER_PORT */
1348
#endif
1349
	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_GROUP_FWD_MASK */
1350
	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_ROOT_ID */
1351
	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_BRIDGE_ID */
1352
	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_ROOT_PORT */
1353
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_ROOT_PATH_COST */
1354 1355
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE */
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
1356 1357 1358 1359
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
1360
	       nla_total_size(ETH_ALEN) +       /* IFLA_BR_GROUP_ADDR */
1361 1362
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_ROUTER */
1363
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_SNOOPING */
1364
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
1365
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERIER */
1366
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_STATS_ENABLED */
1367 1368 1369 1370
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_ELASTICITY */
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_MAX */
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
1371 1372 1373 1374 1375 1376
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
1377
	       nla_total_size(sizeof(u8)) +	/* IFLA_BR_MCAST_IGMP_VERSION */
1378
	       nla_total_size(sizeof(u8)) +	/* IFLA_BR_MCAST_MLD_VERSION */
1379 1380 1381 1382 1383
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IPTABLES */
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IP6TABLES */
	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_ARPTABLES */
1384
#endif
1385
	       nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */
1386 1387 1388 1389 1390 1391 1392 1393 1394
	       0;
}

static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
{
	struct net_bridge *br = netdev_priv(brdev);
	u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
	u32 hello_time = jiffies_to_clock_t(br->hello_time);
	u32 age_time = jiffies_to_clock_t(br->max_age);
1395 1396 1397
	u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
	u32 stp_enabled = br->stp_enabled;
	u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
1398
	u8 vlan_enabled = br_vlan_enabled(br->dev);
1399
	struct br_boolopt_multi bm;
1400 1401 1402
	u64 clockval;

	clockval = br_timer_value(&br->hello_timer);
1403
	if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
1404 1405
		return -EMSGSIZE;
	clockval = br_timer_value(&br->tcn_timer);
1406
	if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
1407 1408
		return -EMSGSIZE;
	clockval = br_timer_value(&br->topology_change_timer);
1409 1410
	if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
			      IFLA_BR_PAD))
1411
		return -EMSGSIZE;
1412
	clockval = br_timer_value(&br->gc_work.timer);
1413
	if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
1414
		return -EMSGSIZE;
1415

1416
	br_boolopt_multi_get(br, &bm);
1417 1418
	if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
	    nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
1419 1420 1421
	    nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
	    nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
	    nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
1422
	    nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
1423
	    nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
1424 1425 1426 1427 1428
	    nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
	    nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
		    &br->bridge_id) ||
	    nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
		    &br->designated_root) ||
1429
	    nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
1430 1431 1432
	    nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
1433
		       br->topology_change_detected) ||
1434 1435
	    nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) ||
	    nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm))
1436 1437
		return -EMSGSIZE;

1438
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
1439
	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
1440
	    nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
1441
	    nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
1442 1443
		       br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
	    nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
1444
		       br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
1445 1446
		return -EMSGSIZE;
#endif
1447
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1448
	if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
1449 1450
	    nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING,
		       br_opt_get(br, BROPT_MULTICAST_ENABLED)) ||
1451
	    nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
1452 1453 1454
		       br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) ||
	    nla_put_u8(skb, IFLA_BR_MCAST_QUERIER,
		       br_opt_get(br, BROPT_MULTICAST_QUERIER)) ||
1455
	    nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
1456
		       br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) ||
1457
	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) ||
1458 1459
	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
	    nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
1460 1461
			br->multicast_last_member_count) ||
	    nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
1462 1463 1464
			br->multicast_startup_query_count) ||
	    nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
		       br->multicast_igmp_version))
1465
		return -EMSGSIZE;
1466 1467 1468 1469 1470
#if IS_ENABLED(CONFIG_IPV6)
	if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
		       br->multicast_mld_version))
		return -EMSGSIZE;
#endif
1471
	clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
1472 1473
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
			      IFLA_BR_PAD))
1474 1475
		return -EMSGSIZE;
	clockval = jiffies_to_clock_t(br->multicast_membership_interval);
1476 1477
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
			      IFLA_BR_PAD))
1478 1479
		return -EMSGSIZE;
	clockval = jiffies_to_clock_t(br->multicast_querier_interval);
1480 1481
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
			      IFLA_BR_PAD))
1482 1483
		return -EMSGSIZE;
	clockval = jiffies_to_clock_t(br->multicast_query_interval);
1484 1485
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
			      IFLA_BR_PAD))
1486 1487
		return -EMSGSIZE;
	clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
1488 1489
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
			      IFLA_BR_PAD))
1490 1491
		return -EMSGSIZE;
	clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
1492 1493
	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
			      IFLA_BR_PAD))
1494
		return -EMSGSIZE;
1495
#endif
1496 1497
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
	if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
1498
		       br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) ||
1499
	    nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
1500
		       br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) ||
1501
	    nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
1502
		       br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0))
1503 1504
		return -EMSGSIZE;
#endif
1505

1506 1507 1508
	return 0;
}

1509
static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
1510
{
1511
	struct net_bridge_port *p = NULL;
1512 1513
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *v;
1514
	struct net_bridge *br;
1515 1516
	int numvls = 0;

1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
	switch (attr) {
	case IFLA_STATS_LINK_XSTATS:
		br = netdev_priv(dev);
		vg = br_vlan_group(br);
		break;
	case IFLA_STATS_LINK_XSTATS_SLAVE:
		p = br_port_get_rtnl(dev);
		if (!p)
			return 0;
		br = p->br;
		vg = nbp_vlan_group(p);
		break;
	default:
		return 0;
	}

1533 1534 1535 1536 1537
	if (vg) {
		/* we need to count all, even placeholder entries */
		list_for_each_entry(v, &vg->vlan_list, vlist)
			numvls++;
	}
1538 1539

	return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
1540
	       nla_total_size(sizeof(struct br_mcast_stats)) +
1541 1542 1543
	       nla_total_size(0);
}

1544 1545 1546
static int br_fill_linkxstats(struct sk_buff *skb,
			      const struct net_device *dev,
			      int *prividx, int attr)
1547
{
1548 1549 1550 1551 1552 1553 1554
	struct nlattr *nla __maybe_unused;
	struct net_bridge_port *p = NULL;
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *v;
	struct net_bridge *br;
	struct nlattr *nest;
	int vl_idx = 0;
1555 1556 1557

	switch (attr) {
	case IFLA_STATS_LINK_XSTATS:
1558 1559
		br = netdev_priv(dev);
		vg = br_vlan_group(br);
1560 1561
		break;
	case IFLA_STATS_LINK_XSTATS_SLAVE:
1562 1563 1564 1565 1566
		p = br_port_get_rtnl(dev);
		if (!p)
			return 0;
		br = p->br;
		vg = nbp_vlan_group(p);
1567
		break;
1568 1569
	default:
		return -EINVAL;
1570 1571
	}

1572 1573 1574 1575
	nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
	if (!nest)
		return -EMSGSIZE;

1576
	if (vg) {
1577 1578 1579
		u16 pvid;

		pvid = br_get_pvid(vg);
1580 1581 1582 1583 1584 1585 1586 1587
		list_for_each_entry(v, &vg->vlan_list, vlist) {
			struct bridge_vlan_xstats vxi;
			struct br_vlan_stats stats;

			if (++vl_idx < *prividx)
				continue;
			memset(&vxi, 0, sizeof(vxi));
			vxi.vid = v->vid;
1588
			vxi.flags = v->flags;
1589 1590
			if (v->vid == pvid)
				vxi.flags |= BRIDGE_VLAN_INFO_PVID;
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
			br_vlan_get_stats(v, &stats);
			vxi.rx_bytes = stats.rx_bytes;
			vxi.rx_packets = stats.rx_packets;
			vxi.tx_bytes = stats.tx_bytes;
			vxi.tx_packets = stats.tx_packets;

			if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
				goto nla_put_failure;
		}
	}

#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
	if (++vl_idx >= *prividx) {
		nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
					sizeof(struct br_mcast_stats),
					BRIDGE_XSTATS_PAD);
		if (!nla)
1608
			goto nla_put_failure;
1609
		br_multicast_get_stats(br, p, nla_data(nla));
1610
	}
1611
#endif
1612 1613
	nla_nest_end(skb, nest);
	*prividx = 0;
1614

1615 1616 1617 1618 1619 1620 1621 1622
	return 0;

nla_put_failure:
	nla_nest_end(skb, nest);
	*prividx = vl_idx;

	return -EMSGSIZE;
}
1623

1624
static struct rtnl_af_ops br_af_ops __read_mostly = {
1625
	.family			= AF_BRIDGE,
1626
	.get_link_af_size	= br_get_link_af_size_filtered,
1627 1628
};

1629
struct rtnl_link_ops br_link_ops __read_mostly = {
1630 1631 1632
	.kind			= "bridge",
	.priv_size		= sizeof(struct net_bridge),
	.setup			= br_dev_setup,
1633
	.maxtype		= IFLA_BR_MAX,
1634
	.policy			= br_policy,
1635 1636
	.validate		= br_validate,
	.newlink		= br_dev_newlink,
1637
	.changelink		= br_changelink,
1638
	.dellink		= br_dev_delete,
1639 1640
	.get_size		= br_get_size,
	.fill_info		= br_fill_info,
1641 1642
	.fill_linkxstats	= br_fill_linkxstats,
	.get_linkxstats_size	= br_get_linkxstats_size,
1643 1644 1645 1646

	.slave_maxtype		= IFLA_BRPORT_MAX,
	.slave_policy		= br_port_policy,
	.slave_changelink	= br_port_slave_changelink,
1647 1648
	.get_slave_size		= br_port_get_slave_size,
	.fill_slave_info	= br_port_fill_slave_info,
1649
};
1650

1651
int __init br_netlink_init(void)
1652
{
1653 1654 1655
	int err;

	br_mdb_init();
1656
	rtnl_af_register(&br_af_ops);
1657

1658 1659 1660 1661
	err = rtnl_link_register(&br_link_ops);
	if (err)
		goto out_af;

1662
	return 0;
1663 1664 1665

out_af:
	rtnl_af_unregister(&br_af_ops);
1666 1667
	br_mdb_uninit();
	return err;
1668 1669
}

1670
void br_netlink_fini(void)
1671
{
1672
	br_mdb_uninit();
1673
	rtnl_af_unregister(&br_af_ops);
1674
	rtnl_link_unregister(&br_link_ops);
1675
}