br_vlan.c 48.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
6
#include <net/switchdev.h>
7 8

#include "br_private.h"
9
#include "br_private_tunnel.h"
10

11 12
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);

13 14
static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
			      const void *ptr)
15
{
16 17 18 19 20 21 22 23 24 25
	const struct net_bridge_vlan *vle = ptr;
	u16 vid = *(u16 *)arg->key;

	return vle->vid != vid;
}

static const struct rhashtable_params br_vlan_rht_params = {
	.head_offset = offsetof(struct net_bridge_vlan, vnode),
	.key_offset = offsetof(struct net_bridge_vlan, vid),
	.key_len = sizeof(u16),
26
	.nelem_hint = 3,
27 28 29 30 31 32 33 34 35 36
	.max_size = VLAN_N_VID,
	.obj_cmpfn = br_vlan_cmp,
	.automatic_shrinking = true,
};

static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
{
	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
}

37 38
static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
			    const struct net_bridge_vlan *v)
39
{
40
	if (vg->pvid == v->vid)
41
		return false;
42 43

	smp_wmb();
44 45
	br_vlan_set_pvid_state(vg, v->state);
	vg->pvid = v->vid;
46 47

	return true;
48 49
}

50
static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
51
{
52
	if (vg->pvid != vid)
53
		return false;
54 55

	smp_wmb();
56
	vg->pvid = 0;
57 58

	return true;
59 60
}

61 62
/* return true if anything changed, false otherwise */
static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
63
{
64
	struct net_bridge_vlan_group *vg;
65 66
	u16 old_flags = v->flags;
	bool ret;
67 68

	if (br_vlan_is_master(v))
69
		vg = br_vlan_group(v->br);
70
	else
71
		vg = nbp_vlan_group(v->port);
72 73

	if (flags & BRIDGE_VLAN_INFO_PVID)
74
		ret = __vlan_add_pvid(vg, v);
75
	else
76
		ret = __vlan_delete_pvid(vg, v->vid);
77 78

	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
79
		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
80
	else
81
		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
82 83

	return ret || !!(old_flags ^ v->flags);
84 85
}

86
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
87 88
			  struct net_bridge_vlan *v, u16 flags,
			  struct netlink_ext_ack *extack)
89 90 91
{
	int err;

92 93
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q add.
94
	 */
95
	err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
96
	if (err == -EOPNOTSUPP)
97 98
		return vlan_vid_add(dev, br->vlan_proto, v->vid);
	v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
99 100 101
	return err;
}

102
static void __vlan_add_list(struct net_bridge_vlan *v)
103
{
104
	struct net_bridge_vlan_group *vg;
105 106
	struct list_head *headp, *hpos;
	struct net_bridge_vlan *vent;
107

108 109 110 111 112 113
	if (br_vlan_is_master(v))
		vg = br_vlan_group(v->br);
	else
		vg = nbp_vlan_group(v->port);

	headp = &vg->vlan_list;
114 115 116 117 118 119
	list_for_each_prev(hpos, headp) {
		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
		if (v->vid < vent->vid)
			continue;
		else
			break;
120
	}
121
	list_add_rcu(&v->vlist, hpos);
122
}
123

124 125
static void __vlan_del_list(struct net_bridge_vlan *v)
{
126
	list_del_rcu(&v->vlist);
127 128
}

129
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
130
			  const struct net_bridge_vlan *v)
131
{
132
	int err;
133

134 135
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q del.
136
	 */
137 138 139 140
	err = br_switchdev_port_vlan_del(dev, v->vid);
	if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
		vlan_vid_del(dev, br->vlan_proto, v->vid);
	return err == -EOPNOTSUPP ? 0 : err;
141 142
}

143
/* Returns a master vlan, if it didn't exist it gets created. In all cases
144 145
 * a reference is taken to the master vlan before returning.
 */
146 147 148
static struct net_bridge_vlan *
br_vlan_get_master(struct net_bridge *br, u16 vid,
		   struct netlink_ext_ack *extack)
149
{
150
	struct net_bridge_vlan_group *vg;
151 152
	struct net_bridge_vlan *masterv;

153 154
	vg = br_vlan_group(br);
	masterv = br_vlan_find(vg, vid);
155
	if (!masterv) {
156 157
		bool changed;

158
		/* missing global ctx, create it now */
159
		if (br_vlan_add(br, vid, 0, &changed, extack))
160
			return NULL;
161
		masterv = br_vlan_find(vg, vid);
162 163
		if (WARN_ON(!masterv))
			return NULL;
164 165
		refcount_set(&masterv->refcnt, 1);
		return masterv;
166
	}
167
	refcount_inc(&masterv->refcnt);
168 169 170 171

	return masterv;
}

172 173 174 175 176 177 178 179 180 181 182
static void br_master_vlan_rcu_free(struct rcu_head *rcu)
{
	struct net_bridge_vlan *v;

	v = container_of(rcu, struct net_bridge_vlan, rcu);
	WARN_ON(!br_vlan_is_master(v));
	free_percpu(v->stats);
	v->stats = NULL;
	kfree(v);
}

183 184
static void br_vlan_put_master(struct net_bridge_vlan *masterv)
{
185 186
	struct net_bridge_vlan_group *vg;

187 188 189
	if (!br_vlan_is_master(masterv))
		return;

190
	vg = br_vlan_group(masterv->br);
191
	if (refcount_dec_and_test(&masterv->refcnt)) {
192
		rhashtable_remove_fast(&vg->vlan_hash,
193 194
				       &masterv->vnode, br_vlan_rht_params);
		__vlan_del_list(masterv);
195
		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
196 197 198
	}
}

199 200 201 202 203 204 205
static void nbp_vlan_rcu_free(struct rcu_head *rcu)
{
	struct net_bridge_vlan *v;

	v = container_of(rcu, struct net_bridge_vlan, rcu);
	WARN_ON(br_vlan_is_master(v));
	/* if we had per-port stats configured then free them here */
206
	if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
207 208 209 210 211
		free_percpu(v->stats);
	v->stats = NULL;
	kfree(v);
}

212 213 214 215
/* This is the shared VLAN add function which works for both ports and bridge
 * devices. There are four possible calls to this function in terms of the
 * vlan entry type:
 * 1. vlan is being added on a port (no master flags, global entry exists)
216
 * 2. vlan is being added on a bridge (both master and brentry flags)
217
 * 3. vlan is being added on a port, but a global entry didn't exist which
218
 *    is being created right now (master flag set, brentry flag unset), the
219
 *    global entry is used for global per-vlan features, but not for filtering
220
 * 4. same as 3 but with both master and brentry flags set so the entry
221 222
 *    will be used for filtering in both the port and the bridge
 */
223 224
static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
		      struct netlink_ext_ack *extack)
225
{
226 227
	struct net_bridge_vlan *masterv = NULL;
	struct net_bridge_port *p = NULL;
228
	struct net_bridge_vlan_group *vg;
229 230 231 232 233 234 235
	struct net_device *dev;
	struct net_bridge *br;
	int err;

	if (br_vlan_is_master(v)) {
		br = v->br;
		dev = br->dev;
236
		vg = br_vlan_group(br);
237 238 239 240
	} else {
		p = v->port;
		br = p->br;
		dev = p->dev;
241
		vg = nbp_vlan_group(p);
242 243 244 245 246 247 248
	}

	if (p) {
		/* Add VLAN to the device filter if it is supported.
		 * This ensures tagged traffic enters the bridge when
		 * promiscuous mode is disabled by br_manage_promisc().
		 */
249
		err = __vlan_vid_add(dev, br, v, flags, extack);
250 251 252 253 254
		if (err)
			goto out;

		/* need to work on the master vlan too */
		if (flags & BRIDGE_VLAN_INFO_MASTER) {
255 256 257 258
			bool changed;

			err = br_vlan_add(br, v->vid,
					  flags | BRIDGE_VLAN_INFO_BRENTRY,
259
					  &changed, extack);
260 261
			if (err)
				goto out_filt;
262 263 264 265

			if (changed)
				br_vlan_notify(br, NULL, v->vid, 0,
					       RTM_NEWVLAN);
266 267
		}

268
		masterv = br_vlan_get_master(br, v->vid, extack);
269 270
		if (!masterv) {
			err = -ENOMEM;
271
			goto out_filt;
272
		}
273
		v->brvlan = masterv;
274
		if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
275 276
			v->stats =
			     netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
277 278 279 280
			if (!v->stats) {
				err = -ENOMEM;
				goto out_filt;
			}
281
			v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
282 283 284
		} else {
			v->stats = masterv->stats;
		}
285
	} else {
286
		err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
287 288
		if (err && err != -EOPNOTSUPP)
			goto out;
289 290
	}

291
	/* Add the dev mac and count the vlan only if it's usable */
292 293 294 295 296 297
	if (br_vlan_should_use(v)) {
		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
		if (err) {
			br_err(br, "failed insert local address into bridge forwarding table\n");
			goto out_filt;
		}
298
		vg->num_vlans++;
299 300
	}

301 302 303
	/* set the state before publishing */
	v->state = BR_STATE_FORWARDING;

304 305
	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
					    br_vlan_rht_params);
306 307
	if (err)
		goto out_fdb_insert;
308

309 310
	__vlan_add_list(v);
	__vlan_add_flags(v, flags);
311 312 313

	if (p)
		nbp_vlan_set_vlan_dev_state(p, v->vid);
314 315 316 317
out:
	return err;

out_fdb_insert:
318 319 320 321
	if (br_vlan_should_use(v)) {
		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
		vg->num_vlans--;
	}
322 323 324

out_filt:
	if (p) {
325
		__vlan_vid_del(dev, br, v);
326
		if (masterv) {
327 328 329 330
			if (v->stats && masterv->stats != v->stats)
				free_percpu(v->stats);
			v->stats = NULL;

331
			br_vlan_put_master(masterv);
332 333
			v->brvlan = NULL;
		}
334 335
	} else {
		br_switchdev_port_vlan_del(dev, v->vid);
336 337 338 339 340 341 342 343
	}

	goto out;
}

static int __vlan_del(struct net_bridge_vlan *v)
{
	struct net_bridge_vlan *masterv = v;
344
	struct net_bridge_vlan_group *vg;
345 346
	struct net_bridge_port *p = NULL;
	int err = 0;
347

348
	if (br_vlan_is_master(v)) {
349
		vg = br_vlan_group(v->br);
350 351
	} else {
		p = v->port;
352
		vg = nbp_vlan_group(v->port);
353 354
		masterv = v->brvlan;
	}
355

356
	__vlan_delete_pvid(vg, v->vid);
357
	if (p) {
358
		err = __vlan_vid_del(p->dev, p->br, v);
359
		if (err)
360
			goto out;
361 362 363 364 365
	} else {
		err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
		if (err && err != -EOPNOTSUPP)
			goto out;
		err = 0;
366
	}
367

368 369 370
	if (br_vlan_should_use(v)) {
		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
		vg->num_vlans--;
371 372 373
	}

	if (masterv != v) {
374
		vlan_tunnel_info_del(vg, v);
375 376
		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
				       br_vlan_rht_params);
377
		__vlan_del_list(v);
378
		nbp_vlan_set_vlan_dev_state(p, v->vid);
379
		call_rcu(&v->rcu, nbp_vlan_rcu_free);
380
	}
381

382
	br_vlan_put_master(masterv);
383 384
out:
	return err;
385 386
}

387 388 389 390
static void __vlan_group_free(struct net_bridge_vlan_group *vg)
{
	WARN_ON(!list_empty(&vg->vlan_list));
	rhashtable_destroy(&vg->vlan_hash);
391
	vlan_tunnel_deinit(vg);
392 393 394
	kfree(vg);
}

395 396 397
static void __vlan_flush(const struct net_bridge *br,
			 const struct net_bridge_port *p,
			 struct net_bridge_vlan_group *vg)
398
{
399
	struct net_bridge_vlan *vlan, *tmp;
400
	u16 v_start = 0, v_end = 0;
401

402
	__vlan_delete_pvid(vg, vg->pvid);
403 404 405 406 407 408 409 410 411 412 413
	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
		/* take care of disjoint ranges */
		if (!v_start) {
			v_start = vlan->vid;
		} else if (vlan->vid - v_end != 1) {
			/* found range end, notify and start next one */
			br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
			v_start = vlan->vid;
		}
		v_end = vlan->vid;

414
		__vlan_del(vlan);
415 416 417 418 419
	}

	/* notify about the last/whole vlan range */
	if (v_start)
		br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
420 421
}

422
struct sk_buff *br_handle_vlan(struct net_bridge *br,
423
			       const struct net_bridge_port *p,
424
			       struct net_bridge_vlan_group *vg,
425
			       struct sk_buff *skb)
426
{
427
	struct pcpu_sw_netstats *stats;
428
	struct net_bridge_vlan *v;
429 430
	u16 vid;

431 432
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
433 434
		goto out;

435 436 437 438 439 440 441
	/* At this point, we know that the frame was filtered and contains
	 * a valid vlan id.  If the vlan id has untagged flag set,
	 * send untagged; otherwise, send tagged.
	 */
	br_vlan_get_tag(skb, &vid);
	v = br_vlan_find(vg, vid);
	/* Vlan entry must be configured at this point.  The
442 443 444 445
	 * only exception is the bridge is set in promisc mode and the
	 * packet is destined for the bridge device.  In this case
	 * pass the packet as is.
	 */
446
	if (!v || !br_vlan_should_use(v)) {
447 448 449 450 451 452 453
		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
			goto out;
		} else {
			kfree_skb(skb);
			return NULL;
		}
	}
454
	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
455 456 457 458 459 460 461
		stats = this_cpu_ptr(v->stats);
		u64_stats_update_begin(&stats->syncp);
		stats->tx_bytes += skb->len;
		stats->tx_packets++;
		u64_stats_update_end(&stats->syncp);
	}

462
	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
463
		__vlan_hwaccel_clear_tag(skb);
464 465 466 467 468 469

	if (p && (p->flags & BR_VLAN_TUNNEL) &&
	    br_handle_egress_vlan_tunnel(skb, v)) {
		kfree_skb(skb);
		return NULL;
	}
470 471 472 473 474
out:
	return skb;
}

/* Called under RCU */
475 476
static bool __allowed_ingress(const struct net_bridge *br,
			      struct net_bridge_vlan_group *vg,
477 478
			      struct sk_buff *skb, u16 *vid,
			      u8 *state)
479
{
480
	struct pcpu_sw_netstats *stats;
481
	struct net_bridge_vlan *v;
482
	bool tagged;
483

484
	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
485 486 487 488
	/* If vlan tx offload is disabled on bridge device and frame was
	 * sent from vlan device on the bridge device, it does not have
	 * HW accelerated vlan tag.
	 */
489
	if (unlikely(!skb_vlan_tag_present(skb) &&
490
		     skb->protocol == br->vlan_proto)) {
491
		skb = skb_vlan_untag(skb);
492 493 494 495
		if (unlikely(!skb))
			return false;
	}

496 497
	if (!br_vlan_get_tag(skb, vid)) {
		/* Tagged frame */
498
		if (skb->vlan_proto != br->vlan_proto) {
499 500
			/* Protocol-mismatch, empty out vlan_tci for new tag */
			skb_push(skb, ETH_HLEN);
501
			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
502
							skb_vlan_tag_get(skb));
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
			if (unlikely(!skb))
				return false;

			skb_pull(skb, ETH_HLEN);
			skb_reset_mac_len(skb);
			*vid = 0;
			tagged = false;
		} else {
			tagged = true;
		}
	} else {
		/* Untagged frame */
		tagged = false;
	}

518
	if (!*vid) {
519 520
		u16 pvid = br_get_pvid(vg);

521 522 523
		/* Frame had a tag with VID 0 or did not have a tag.
		 * See if pvid is set on this port.  That tells us which
		 * vlan untagged or priority-tagged traffic belongs to.
524
		 */
V
Vlad Yasevich 已提交
525
		if (!pvid)
526
			goto drop;
527

528 529
		/* PVID is set on this port.  Any untagged or priority-tagged
		 * ingress frame is considered to belong to this vlan.
530
		 */
531
		*vid = pvid;
532
		if (likely(!tagged))
533
			/* Untagged Frame. */
534
			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
535 536
		else
			/* Priority-tagged Frame.
537 538
			 * At this point, we know that skb->vlan_tci VID
			 * field was 0.
539 540 541 542
			 * We update only VID field and preserve PCP field.
			 */
			skb->vlan_tci |= pvid;

543
		/* if stats are disabled we can avoid the lookup */
544 545 546 547 548 549 550 551
		if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
			if (*state == BR_STATE_FORWARDING) {
				*state = br_vlan_get_pvid_state(vg);
				return br_vlan_state_allowed(*state, true);
			} else {
				return true;
			}
		}
552
	}
553
	v = br_vlan_find(vg, *vid);
554 555 556
	if (!v || !br_vlan_should_use(v))
		goto drop;

557 558 559 560 561 562
	if (*state == BR_STATE_FORWARDING) {
		*state = br_vlan_get_state(v);
		if (!br_vlan_state_allowed(*state, true))
			goto drop;
	}

563
	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
564 565 566 567 568 569 570 571 572
		stats = this_cpu_ptr(v->stats);
		u64_stats_update_begin(&stats->syncp);
		stats->rx_bytes += skb->len;
		stats->rx_packets++;
		u64_stats_update_end(&stats->syncp);
	}

	return true;

573 574
drop:
	kfree_skb(skb);
575 576 577
	return false;
}

578 579
bool br_allowed_ingress(const struct net_bridge *br,
			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
580
			u16 *vid, u8 *state)
581 582 583 584
{
	/* If VLAN filtering is disabled on the bridge, all packets are
	 * permitted.
	 */
585
	if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
586 587 588 589
		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
		return true;
	}

590
	return __allowed_ingress(br, vg, skb, vid, state);
591 592
}

593
/* Called under RCU. */
594
bool br_allowed_egress(struct net_bridge_vlan_group *vg,
595 596
		       const struct sk_buff *skb)
{
597
	const struct net_bridge_vlan *v;
598 599
	u16 vid;

600 601
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
602 603 604
		return true;

	br_vlan_get_tag(skb, &vid);
605
	v = br_vlan_find(vg, vid);
606 607
	if (v && br_vlan_should_use(v) &&
	    br_vlan_state_allowed(br_vlan_get_state(v), false))
608 609 610 611 612
		return true;

	return false;
}

613 614 615
/* Called under RCU */
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
{
616
	struct net_bridge_vlan_group *vg;
617
	struct net_bridge *br = p->br;
618
	struct net_bridge_vlan *v;
619

620
	/* If filtering was disabled at input, let it pass. */
621
	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
622 623
		return true;

624
	vg = nbp_vlan_group_rcu(p);
625
	if (!vg || !vg->num_vlans)
626 627
		return false;

628 629 630
	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
		*vid = 0;

631
	if (!*vid) {
632
		*vid = br_get_pvid(vg);
633 634
		if (!*vid ||
		    !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
635 636 637 638 639
			return false;

		return true;
	}

640 641
	v = br_vlan_find(vg, *vid);
	if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
642 643 644 645 646
		return true;

	return false;
}

647 648 649
static int br_vlan_add_existing(struct net_bridge *br,
				struct net_bridge_vlan_group *vg,
				struct net_bridge_vlan *vlan,
650 651
				u16 flags, bool *changed,
				struct netlink_ext_ack *extack)
652 653 654
{
	int err;

655
	err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
656 657 658
	if (err && err != -EOPNOTSUPP)
		return err;

659 660
	if (!br_vlan_is_brentry(vlan)) {
		/* Trying to change flags of non-existent bridge vlan */
661 662 663 664
		if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
			err = -EINVAL;
			goto err_flags;
		}
665 666 667 668 669
		/* It was only kept for port vlans, now make it real */
		err = br_fdb_insert(br, NULL, br->dev->dev_addr,
				    vlan->vid);
		if (err) {
			br_err(br, "failed to insert local address into bridge forwarding table\n");
670
			goto err_fdb_insert;
671 672 673 674 675 676 677 678 679 680 681 682
		}

		refcount_inc(&vlan->refcnt);
		vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
		vg->num_vlans++;
		*changed = true;
	}

	if (__vlan_add_flags(vlan, flags))
		*changed = true;

	return 0;
683 684 685 686 687

err_fdb_insert:
err_flags:
	br_switchdev_port_vlan_del(br->dev, vlan->vid);
	return err;
688 689
}

690 691
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
692
 * changed must be true only if the vlan was created or updated
693
 */
694 695
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
		struct netlink_ext_ack *extack)
696
{
697
	struct net_bridge_vlan_group *vg;
698 699
	struct net_bridge_vlan *vlan;
	int ret;
700 701 702

	ASSERT_RTNL();

703
	*changed = false;
704 705
	vg = br_vlan_group(br);
	vlan = br_vlan_find(vg, vid);
706
	if (vlan)
707 708
		return br_vlan_add_existing(br, vg, vlan, flags, changed,
					    extack);
709

710 711
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
712 713
		return -ENOMEM;

714
	vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
715 716 717 718
	if (!vlan->stats) {
		kfree(vlan);
		return -ENOMEM;
	}
719 720 721 722 723
	vlan->vid = vid;
	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
	vlan->br = br;
	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
724
		refcount_set(&vlan->refcnt, 1);
725
	ret = __vlan_add(vlan, flags, extack);
726 727
	if (ret) {
		free_percpu(vlan->stats);
728
		kfree(vlan);
729 730
	} else {
		*changed = true;
731
	}
732

733
	return ret;
734 735
}

736 737 738
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
739 740
int br_vlan_delete(struct net_bridge *br, u16 vid)
{
741
	struct net_bridge_vlan_group *vg;
742
	struct net_bridge_vlan *v;
743 744 745

	ASSERT_RTNL();

746 747
	vg = br_vlan_group(br);
	v = br_vlan_find(vg, vid);
748 749
	if (!v || !br_vlan_is_brentry(v))
		return -ENOENT;
750

751
	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
752
	br_fdb_delete_by_port(br, NULL, vid, 0);
753

754 755
	vlan_tunnel_info_del(vg, v);

756
	return __vlan_del(v);
757 758 759 760
}

void br_vlan_flush(struct net_bridge *br)
{
761 762
	struct net_bridge_vlan_group *vg;

763 764
	ASSERT_RTNL();

765
	vg = br_vlan_group(br);
766
	__vlan_flush(br, NULL, vg);
767 768 769
	RCU_INIT_POINTER(br->vlgrp, NULL);
	synchronize_rcu();
	__vlan_group_free(vg);
770 771
}

772
struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
773
{
774 775
	if (!vg)
		return NULL;
776

777
	return br_vlan_lookup(&vg->vlan_hash, vid);
778 779
}

780 781 782
/* Must be protected by RTNL. */
static void recalculate_group_addr(struct net_bridge *br)
{
783
	if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
784 785 786
		return;

	spin_lock_bh(&br->lock);
787 788
	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
	    br->vlan_proto == htons(ETH_P_8021Q)) {
789 790 791 792 793 794 795 796 797 798 799 800
		/* Bridge Group Address */
		br->group_addr[5] = 0x00;
	} else { /* vlan_enabled && ETH_P_8021AD */
		/* Provider Bridge Group Address */
		br->group_addr[5] = 0x08;
	}
	spin_unlock_bh(&br->lock);
}

/* Must be protected by RTNL. */
void br_recalculate_fwd_mask(struct net_bridge *br)
{
801 802
	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
	    br->vlan_proto == htons(ETH_P_8021Q))
803 804 805 806 807 808
		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
	else /* vlan_enabled && ETH_P_8021AD */
		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
					      ~(1u << br->group_addr[5]);
}

809
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
810
{
811 812 813 814 815 816 817 818
	struct switchdev_attr attr = {
		.orig_dev = br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
		.u.vlan_filtering = val,
	};
	int err;

819
	if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
820
		return 0;
821

822 823 824 825
	err = switchdev_port_attr_set(br->dev, &attr);
	if (err && err != -EOPNOTSUPP)
		return err;

826
	br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
827
	br_manage_promisc(br);
828 829
	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);
830

831 832 833
	return 0;
}

834 835 836 837
bool br_vlan_enabled(const struct net_device *dev)
{
	struct net_bridge *br = netdev_priv(dev);

838
	return br_opt_get(br, BROPT_VLAN_ENABLED);
839 840 841
}
EXPORT_SYMBOL_GPL(br_vlan_enabled);

W
wenxu 已提交
842 843 844 845 846 847 848 849 850 851
int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
{
	struct net_bridge *br = netdev_priv(dev);

	*p_proto = ntohs(br->vlan_proto);

	return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_proto);

852
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
853
{
854 855 856 857 858 859
	struct switchdev_attr attr = {
		.orig_dev = br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
		.u.vlan_protocol = ntohs(proto),
	};
860 861
	int err = 0;
	struct net_bridge_port *p;
862
	struct net_bridge_vlan *vlan;
863
	struct net_bridge_vlan_group *vg;
864
	__be16 oldproto = br->vlan_proto;
865 866

	if (br->vlan_proto == proto)
867
		return 0;
868

869 870 871 872
	err = switchdev_port_attr_set(br->dev, &attr);
	if (err && err != -EOPNOTSUPP)
		return err;

873 874
	/* Add VLANs for the new proto to the device filter. */
	list_for_each_entry(p, &br->port_list, list) {
875 876
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
877
			err = vlan_vid_add(p->dev, proto, vlan->vid);
878 879 880 881 882 883 884 885 886 887 888
			if (err)
				goto err_filt;
		}
	}

	br->vlan_proto = proto;

	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);

	/* Delete VLANs for the old proto from the device filter. */
889 890 891
	list_for_each_entry(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist)
892
			vlan_vid_del(p->dev, oldproto, vlan->vid);
893
	}
894

895
	return 0;
896 897

err_filt:
898 899 900
	attr.u.vlan_protocol = ntohs(oldproto);
	switchdev_port_attr_set(br->dev, &attr);

901
	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
902
		vlan_vid_del(p->dev, proto, vlan->vid);
903

904 905 906
	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist)
907
			vlan_vid_del(p->dev, proto, vlan->vid);
908
	}
909

910 911 912 913 914
	return err;
}

int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
{
915
	if (!eth_type_vlan(htons(val)))
916 917
		return -EPROTONOSUPPORT;

918
	return __br_vlan_set_proto(br, htons(val));
919 920
}

921 922 923 924 925
int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
{
	switch (val) {
	case 0:
	case 1:
926
		br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
927 928
		break;
	default:
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
		return -EINVAL;
	}

	return 0;
}

int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
{
	struct net_bridge_port *p;

	/* allow to change the option if there are no port vlans configured */
	list_for_each_entry(p, &br->port_list, list) {
		struct net_bridge_vlan_group *vg = nbp_vlan_group(p);

		if (vg->num_vlans)
			return -EBUSY;
	}

	switch (val) {
	case 0:
	case 1:
		br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
		break;
	default:
953 954 955 956 957 958
		return -EINVAL;
	}

	return 0;
}

959
static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
960
{
961 962
	struct net_bridge_vlan *v;

963
	if (vid != vg->pvid)
964 965 966 967 968 969 970 971
		return false;

	v = br_vlan_lookup(&vg->vlan_hash, vid);
	if (v && br_vlan_should_use(v) &&
	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
		return true;

	return false;
972 973 974 975 976 977 978 979 980 981
}

static void br_vlan_disable_default_pvid(struct net_bridge *br)
{
	struct net_bridge_port *p;
	u16 pvid = br->default_pvid;

	/* Disable default_pvid on all ports where it is still
	 * configured.
	 */
982 983 984 985
	if (vlan_default_pvid(br_vlan_group(br), pvid)) {
		if (!br_vlan_delete(br, pvid))
			br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
	}
986 987

	list_for_each_entry(p, &br->port_list, list) {
988 989 990
		if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
		    !nbp_vlan_delete(p, pvid))
			br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
991 992 993 994 995
	}

	br->default_pvid = 0;
}

996 997
int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
			       struct netlink_ext_ack *extack)
998
{
999
	const struct net_bridge_vlan *pvent;
1000
	struct net_bridge_vlan_group *vg;
1001
	struct net_bridge_port *p;
1002 1003
	unsigned long *changed;
	bool vlchange;
1004 1005 1006
	u16 old_pvid;
	int err = 0;

1007 1008 1009 1010 1011
	if (!pvid) {
		br_vlan_disable_default_pvid(br);
		return 0;
	}

1012
	changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
1013 1014 1015 1016 1017 1018 1019 1020
	if (!changed)
		return -ENOMEM;

	old_pvid = br->default_pvid;

	/* Update default_pvid config only if we do not conflict with
	 * user configuration.
	 */
1021 1022 1023
	vg = br_vlan_group(br);
	pvent = br_vlan_find(vg, pvid);
	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
1024
	    (!pvent || !br_vlan_should_use(pvent))) {
1025 1026
		err = br_vlan_add(br, pvid,
				  BRIDGE_VLAN_INFO_PVID |
1027
				  BRIDGE_VLAN_INFO_UNTAGGED |
1028
				  BRIDGE_VLAN_INFO_BRENTRY,
1029
				  &vlchange, extack);
1030 1031
		if (err)
			goto out;
1032 1033 1034 1035

		if (br_vlan_delete(br, old_pvid))
			br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
		br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
1036 1037 1038 1039 1040 1041 1042
		set_bit(0, changed);
	}

	list_for_each_entry(p, &br->port_list, list) {
		/* Update default_pvid config only if we do not conflict with
		 * user configuration.
		 */
1043
		vg = nbp_vlan_group(p);
1044
		if ((old_pvid &&
1045 1046
		     !vlan_default_pvid(vg, old_pvid)) ||
		    br_vlan_find(vg, pvid))
1047 1048 1049 1050
			continue;

		err = nbp_vlan_add(p, pvid,
				   BRIDGE_VLAN_INFO_PVID |
1051
				   BRIDGE_VLAN_INFO_UNTAGGED,
1052
				   &vlchange, extack);
1053 1054
		if (err)
			goto err_port;
1055 1056 1057
		if (nbp_vlan_delete(p, old_pvid))
			br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
		br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
1058 1059 1060 1061 1062 1063
		set_bit(p->port_no, changed);
	}

	br->default_pvid = pvid;

out:
1064
	bitmap_free(changed);
1065 1066 1067 1068 1069 1070 1071
	return err;

err_port:
	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
		if (!test_bit(p->port_no, changed))
			continue;

1072
		if (old_pvid) {
1073 1074
			nbp_vlan_add(p, old_pvid,
				     BRIDGE_VLAN_INFO_PVID |
1075
				     BRIDGE_VLAN_INFO_UNTAGGED,
1076
				     &vlchange, NULL);
1077 1078
			br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
		}
1079
		nbp_vlan_delete(p, pvid);
1080
		br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1081 1082 1083
	}

	if (test_bit(0, changed)) {
1084
		if (old_pvid) {
1085 1086
			br_vlan_add(br, old_pvid,
				    BRIDGE_VLAN_INFO_PVID |
1087
				    BRIDGE_VLAN_INFO_UNTAGGED |
1088
				    BRIDGE_VLAN_INFO_BRENTRY,
1089
				    &vlchange, NULL);
1090 1091
			br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
		}
1092
		br_vlan_delete(br, pvid);
1093
		br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1094 1095 1096 1097
	}
	goto out;
}

1098 1099 1100 1101 1102
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
{
	u16 pvid = val;
	int err = 0;

1103
	if (val >= VLAN_VID_MASK)
1104 1105 1106
		return -EINVAL;

	if (pvid == br->default_pvid)
1107
		goto out;
1108 1109

	/* Only allow default pvid change when filtering is disabled */
1110
	if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1111 1112
		pr_info_once("Please disable vlan filtering to change default_pvid\n");
		err = -EPERM;
1113
		goto out;
1114
	}
1115
	err = __br_vlan_set_default_pvid(br, pvid, NULL);
1116
out:
1117 1118 1119
	return err;
}

1120
int br_vlan_init(struct net_bridge *br)
1121
{
1122
	struct net_bridge_vlan_group *vg;
1123 1124
	int ret = -ENOMEM;

1125 1126
	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
	if (!vg)
1127
		goto out;
1128
	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1129 1130
	if (ret)
		goto err_rhtbl;
1131 1132 1133
	ret = vlan_tunnel_init(vg);
	if (ret)
		goto err_tunnel_init;
1134
	INIT_LIST_HEAD(&vg->vlan_list);
1135
	br->vlan_proto = htons(ETH_P_8021Q);
1136
	br->default_pvid = 1;
1137
	rcu_assign_pointer(br->vlgrp, vg);
1138 1139 1140 1141

out:
	return ret;

1142
err_tunnel_init:
1143
	rhashtable_destroy(&vg->vlan_hash);
1144
err_rhtbl:
1145
	kfree(vg);
1146 1147 1148 1149

	goto out;
}

1150
int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1151
{
1152 1153 1154 1155
	struct switchdev_attr attr = {
		.orig_dev = p->br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1156
		.u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1157
	};
1158
	struct net_bridge_vlan_group *vg;
1159 1160
	int ret = -ENOMEM;

1161 1162
	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
	if (!vg)
1163 1164
		goto out;

1165 1166 1167 1168
	ret = switchdev_port_attr_set(p->dev, &attr);
	if (ret && ret != -EOPNOTSUPP)
		goto err_vlan_enabled;

1169
	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1170 1171
	if (ret)
		goto err_rhtbl;
1172 1173 1174
	ret = vlan_tunnel_init(vg);
	if (ret)
		goto err_tunnel_init;
1175
	INIT_LIST_HEAD(&vg->vlan_list);
1176
	rcu_assign_pointer(p->vlgrp, vg);
1177
	if (p->br->default_pvid) {
1178 1179
		bool changed;

1180 1181
		ret = nbp_vlan_add(p, p->br->default_pvid,
				   BRIDGE_VLAN_INFO_PVID |
1182
				   BRIDGE_VLAN_INFO_UNTAGGED,
1183
				   &changed, extack);
1184 1185
		if (ret)
			goto err_vlan_add;
1186
		br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
1187 1188 1189 1190 1191
	}
out:
	return ret;

err_vlan_add:
1192 1193
	RCU_INIT_POINTER(p->vlgrp, NULL);
	synchronize_rcu();
1194 1195 1196
	vlan_tunnel_deinit(vg);
err_tunnel_init:
	rhashtable_destroy(&vg->vlan_hash);
1197
err_rhtbl:
1198
err_vlan_enabled:
1199
	kfree(vg);
1200 1201

	goto out;
1202 1203
}

1204 1205
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
1206
 * changed must be true only if the vlan was created or updated
1207
 */
1208
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1209
		 bool *changed, struct netlink_ext_ack *extack)
1210
{
1211 1212
	struct net_bridge_vlan *vlan;
	int ret;
1213 1214 1215

	ASSERT_RTNL();

1216
	*changed = false;
1217
	vlan = br_vlan_find(nbp_vlan_group(port), vid);
1218
	if (vlan) {
1219
		/* Pass the flags to the hardware bridge */
1220
		ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1221 1222
		if (ret && ret != -EOPNOTSUPP)
			return ret;
1223 1224
		*changed = __vlan_add_flags(vlan, flags);

1225
		return 0;
1226 1227
	}

1228 1229 1230
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
		return -ENOMEM;
1231

1232 1233
	vlan->vid = vid;
	vlan->port = port;
1234
	ret = __vlan_add(vlan, flags, extack);
1235 1236
	if (ret)
		kfree(vlan);
1237 1238
	else
		*changed = true;
1239

1240
	return ret;
1241 1242
}

1243 1244 1245
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
1246 1247
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{
1248
	struct net_bridge_vlan *v;
1249 1250 1251

	ASSERT_RTNL();

1252
	v = br_vlan_find(nbp_vlan_group(port), vid);
1253 1254
	if (!v)
		return -ENOENT;
1255
	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1256
	br_fdb_delete_by_port(port->br, port, vid, 0);
1257

1258
	return __vlan_del(v);
1259 1260 1261 1262
}

void nbp_vlan_flush(struct net_bridge_port *port)
{
1263 1264
	struct net_bridge_vlan_group *vg;

1265 1266
	ASSERT_RTNL();

1267
	vg = nbp_vlan_group(port);
1268
	__vlan_flush(port->br, port, vg);
1269 1270 1271
	RCU_INIT_POINTER(port->vlgrp, NULL);
	synchronize_rcu();
	__vlan_group_free(vg);
1272
}
1273 1274

void br_vlan_get_stats(const struct net_bridge_vlan *v,
1275
		       struct pcpu_sw_netstats *stats)
1276 1277 1278 1279 1280 1281
{
	int i;

	memset(stats, 0, sizeof(*stats));
	for_each_possible_cpu(i) {
		u64 rxpackets, rxbytes, txpackets, txbytes;
1282
		struct pcpu_sw_netstats *cpu_stats;
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
		unsigned int start;

		cpu_stats = per_cpu_ptr(v->stats, i);
		do {
			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
			rxpackets = cpu_stats->rx_packets;
			rxbytes = cpu_stats->rx_bytes;
			txbytes = cpu_stats->tx_bytes;
			txpackets = cpu_stats->tx_packets;
		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));

		stats->rx_packets += rxpackets;
		stats->rx_bytes += rxbytes;
		stats->tx_bytes += txbytes;
		stats->tx_packets += txpackets;
	}
}
1300

1301
int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1302 1303
{
	struct net_bridge_vlan_group *vg;
1304
	struct net_bridge_port *p;
1305

1306 1307
	ASSERT_RTNL();
	p = br_port_get_check_rtnl(dev);
1308 1309 1310
	if (p)
		vg = nbp_vlan_group(p);
	else if (netif_is_bridge_master(dev))
1311 1312 1313 1314 1315 1316 1317 1318 1319
		vg = br_vlan_group(netdev_priv(dev));
	else
		return -EINVAL;

	*p_pvid = br_get_pvid(vg);
	return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_pvid);

1320 1321
int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
{
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
	struct net_bridge_vlan_group *vg;
	struct net_bridge_port *p;

	p = br_port_get_check_rcu(dev);
	if (p)
		vg = nbp_vlan_group_rcu(p);
	else if (netif_is_bridge_master(dev))
		vg = br_vlan_group_rcu(netdev_priv(dev));
	else
		return -EINVAL;

	*p_pvid = br_get_pvid(vg);
	return 0;
1335 1336 1337
}
EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);

1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
int br_vlan_get_info(const struct net_device *dev, u16 vid,
		     struct bridge_vlan_info *p_vinfo)
{
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *v;
	struct net_bridge_port *p;

	ASSERT_RTNL();
	p = br_port_get_check_rtnl(dev);
	if (p)
		vg = nbp_vlan_group(p);
1349 1350
	else if (netif_is_bridge_master(dev))
		vg = br_vlan_group(netdev_priv(dev));
1351 1352 1353 1354 1355 1356 1357 1358 1359
	else
		return -EINVAL;

	v = br_vlan_find(vg, vid);
	if (!v)
		return -ENOENT;

	p_vinfo->vid = vid;
	p_vinfo->flags = v->flags;
1360 1361
	if (vid == br_get_pvid(vg))
		p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1362 1363 1364
	return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_info);
1365 1366 1367 1368 1369 1370 1371 1372

static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
{
	return is_vlan_dev(dev) &&
		!!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
}

static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1373
			       __always_unused struct netdev_nested_priv *priv)
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
{
	return br_vlan_is_bind_vlan_dev(dev);
}

static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
{
	int found;

	rcu_read_lock();
	found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
					      NULL);
	rcu_read_unlock();

	return !!found;
}

struct br_vlan_bind_walk_data {
	u16 vid;
	struct net_device *result;
};

static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1396
					  struct netdev_nested_priv *priv)
1397
{
1398
	struct br_vlan_bind_walk_data *data = priv->data;
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
	int found = 0;

	if (br_vlan_is_bind_vlan_dev(dev) &&
	    vlan_dev_priv(dev)->vlan_id == data->vid) {
		data->result = dev;
		found = 1;
	}

	return found;
}

static struct net_device *
br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
{
	struct br_vlan_bind_walk_data data = {
		.vid = vid,
	};
1416 1417 1418
	struct netdev_nested_priv priv = {
		.data = (void *)&data,
	};
1419 1420 1421

	rcu_read_lock();
	netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1422
				      &priv);
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
	rcu_read_unlock();

	return data.result;
}

static bool br_vlan_is_dev_up(const struct net_device *dev)
{
	return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
}

static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
				       struct net_device *vlan_dev)
{
	u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
	struct net_bridge_vlan_group *vg;
	struct net_bridge_port *p;
	bool has_carrier = false;

1441 1442 1443 1444 1445
	if (!netif_carrier_ok(br->dev)) {
		netif_carrier_off(vlan_dev);
		return;
	}

1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
	list_for_each_entry(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
			has_carrier = true;
			break;
		}
	}

	if (has_carrier)
		netif_carrier_on(vlan_dev);
	else
		netif_carrier_off(vlan_dev);
}

static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
{
	struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
	struct net_bridge_vlan *vlan;
	struct net_device *vlan_dev;

	list_for_each_entry(vlan, &vg->vlan_list, vlist) {
		vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
							   vlan->vid);
		if (vlan_dev) {
1470 1471 1472 1473
			if (br_vlan_is_dev_up(p->dev)) {
				if (netif_carrier_ok(p->br->dev))
					netif_carrier_on(vlan_dev);
			} else {
1474
				br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1475
			}
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
		}
	}
}

static void br_vlan_upper_change(struct net_device *dev,
				 struct net_device *upper_dev,
				 bool linking)
{
	struct net_bridge *br = netdev_priv(dev);

	if (!br_vlan_is_bind_vlan_dev(upper_dev))
		return;

	if (linking) {
		br_vlan_set_vlan_dev_state(br, upper_dev);
		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
	} else {
		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
			      br_vlan_has_upper_bind_vlan_dev(dev));
	}
}

1498 1499 1500 1501 1502
struct br_vlan_link_state_walk_data {
	struct net_bridge *br;
};

static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1503
					struct netdev_nested_priv *priv)
1504
{
1505
	struct br_vlan_link_state_walk_data *data = priv->data;
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518

	if (br_vlan_is_bind_vlan_dev(vlan_dev))
		br_vlan_set_vlan_dev_state(data->br, vlan_dev);

	return 0;
}

static void br_vlan_link_state_change(struct net_device *dev,
				      struct net_bridge *br)
{
	struct br_vlan_link_state_walk_data data = {
		.br = br
	};
1519 1520 1521
	struct netdev_nested_priv priv = {
		.data = (void *)&data,
	};
1522 1523 1524

	rcu_read_lock();
	netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1525
				      &priv);
1526 1527 1528
	rcu_read_unlock();
}

1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
/* Must be protected by RTNL. */
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
{
	struct net_device *vlan_dev;

	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
		return;

	vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
	if (vlan_dev)
		br_vlan_set_vlan_dev_state(p->br, vlan_dev);
}

1542
/* Must be protected by RTNL. */
1543
int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1544 1545
{
	struct netdev_notifier_changeupper_info *info;
1546
	struct net_bridge *br = netdev_priv(dev);
1547 1548
	int vlcmd = 0, ret = 0;
	bool changed = false;
1549 1550

	switch (event) {
1551 1552 1553 1554 1555
	case NETDEV_REGISTER:
		ret = br_vlan_add(br, br->default_pvid,
				  BRIDGE_VLAN_INFO_PVID |
				  BRIDGE_VLAN_INFO_UNTAGGED |
				  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1556
		vlcmd = RTM_NEWVLAN;
1557 1558
		break;
	case NETDEV_UNREGISTER:
1559 1560
		changed = !br_vlan_delete(br, br->default_pvid);
		vlcmd = RTM_DELVLAN;
1561
		break;
1562 1563 1564 1565
	case NETDEV_CHANGEUPPER:
		info = ptr;
		br_vlan_upper_change(dev, info->upper_dev, info->linking);
		break;
1566 1567 1568 1569

	case NETDEV_CHANGE:
	case NETDEV_UP:
		if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1570
			break;
1571 1572
		br_vlan_link_state_change(dev, br);
		break;
1573
	}
1574 1575
	if (changed)
		br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1576 1577

	return ret;
1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
}

/* Must be protected by RTNL. */
void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
{
	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
		return;

	switch (event) {
	case NETDEV_CHANGE:
	case NETDEV_DOWN:
	case NETDEV_UP:
		br_vlan_set_all_vlan_dev_state(p);
		break;
	}
}
1594

1595 1596 1597
static bool br_vlan_stats_fill(struct sk_buff *skb,
			       const struct net_bridge_vlan *v)
{
1598
	struct pcpu_sw_netstats stats;
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
	struct nlattr *nest;

	nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
	if (!nest)
		return false;

	br_vlan_get_stats(v, &stats);
	if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
			      BRIDGE_VLANDB_STATS_PAD) ||
	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
			      stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
			      BRIDGE_VLANDB_STATS_PAD) ||
	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
			      stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
		goto out_err;

	nla_nest_end(skb, nest);

	return true;

out_err:
	nla_nest_cancel(skb, nest);
	return false;
}

1625
/* v_opts is used to dump the options which must be equal in the whole range */
1626
static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1627
			      const struct net_bridge_vlan *v_opts,
1628 1629
			      u16 flags,
			      bool dump_stats)
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
{
	struct bridge_vlan_info info;
	struct nlattr *nest;

	nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
	if (!nest)
		return false;

	memset(&info, 0, sizeof(info));
	info.vid = vid;
	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
		info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
	if (flags & BRIDGE_VLAN_INFO_PVID)
		info.flags |= BRIDGE_VLAN_INFO_PVID;

	if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
		goto out_err;

1648 1649 1650 1651 1652
	if (vid_range && vid < vid_range &&
	    !(flags & BRIDGE_VLAN_INFO_PVID) &&
	    nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
		goto out_err;

1653 1654 1655 1656 1657 1658 1659
	if (v_opts) {
		if (!br_vlan_opts_fill(skb, v_opts))
			goto out_err;

		if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
			goto out_err;
	}
1660

1661 1662 1663 1664 1665 1666 1667 1668 1669
	nla_nest_end(skb, nest);

	return true;

out_err:
	nla_nest_cancel(skb, nest);
	return false;
}

1670 1671 1672 1673 1674
static size_t rtnl_vlan_nlmsg_size(void)
{
	return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
		+ nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
		+ nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1675 1676
		+ nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
		+ br_vlan_opts_nl_size(); /* bridge vlan options */
1677 1678 1679 1680 1681 1682 1683 1684
}

void br_vlan_notify(const struct net_bridge *br,
		    const struct net_bridge_port *p,
		    u16 vid, u16 vid_range,
		    int cmd)
{
	struct net_bridge_vlan_group *vg;
1685
	struct net_bridge_vlan *v = NULL;
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
	struct br_vlan_msg *bvm;
	struct nlmsghdr *nlh;
	struct sk_buff *skb;
	int err = -ENOBUFS;
	struct net *net;
	u16 flags = 0;
	int ifindex;

	/* right now notifications are done only with rtnl held */
	ASSERT_RTNL();

	if (p) {
		ifindex = p->dev->ifindex;
		vg = nbp_vlan_group(p);
		net = dev_net(p->dev);
	} else {
		ifindex = br->dev->ifindex;
		vg = br_vlan_group(br);
		net = dev_net(br->dev);
	}

	skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
	if (!skb)
		goto out_err;

	err = -EMSGSIZE;
	nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
	if (!nlh)
		goto out_err;
	bvm = nlmsg_data(nlh);
	memset(bvm, 0, sizeof(*bvm));
	bvm->family = AF_BRIDGE;
	bvm->ifindex = ifindex;

	switch (cmd) {
	case RTM_NEWVLAN:
		/* need to find the vlan due to flags/options */
		v = br_vlan_find(vg, vid);
		if (!v || !br_vlan_should_use(v))
			goto out_kfree;

		flags = v->flags;
		if (br_get_pvid(vg) == v->vid)
			flags |= BRIDGE_VLAN_INFO_PVID;
		break;
	case RTM_DELVLAN:
		break;
	default:
		goto out_kfree;
	}

1737
	if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
		goto out_err;

	nlmsg_end(skb, nlh);
	rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
	return;

out_err:
	rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
out_kfree:
	kfree_skb(skb);
}

1750
/* check if v_curr can enter a range ending in range_end */
1751 1752
bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
			     const struct net_bridge_vlan *range_end)
1753 1754
{
	return v_curr->vid - range_end->vid == 1 &&
1755
	       range_end->flags == v_curr->flags &&
1756
	       br_vlan_opts_eq_range(v_curr, range_end);
1757 1758
}

1759 1760
static int br_vlan_dump_dev(const struct net_device *dev,
			    struct sk_buff *skb,
1761 1762
			    struct netlink_callback *cb,
			    u32 dump_flags)
1763
{
1764
	struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1765
	bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
	struct net_bridge_vlan_group *vg;
	int idx = 0, s_idx = cb->args[1];
	struct nlmsghdr *nlh = NULL;
	struct net_bridge_port *p;
	struct br_vlan_msg *bvm;
	struct net_bridge *br;
	int err = 0;
	u16 pvid;

	if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
		return -EINVAL;

	if (netif_is_bridge_master(dev)) {
		br = netdev_priv(dev);
		vg = br_vlan_group_rcu(br);
		p = NULL;
	} else {
		p = br_port_get_rcu(dev);
		if (WARN_ON(!p))
			return -EINVAL;
		vg = nbp_vlan_group_rcu(p);
		br = p->br;
	}

	if (!vg)
		return 0;

	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
			RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
	if (!nlh)
		return -EMSGSIZE;
	bvm = nlmsg_data(nlh);
	memset(bvm, 0, sizeof(*bvm));
	bvm->family = PF_BRIDGE;
	bvm->ifindex = dev->ifindex;
	pvid = br_get_pvid(vg);

1803
	/* idx must stay at range's beginning until it is filled in */
1804 1805 1806
	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
		if (!br_vlan_should_use(v))
			continue;
1807 1808 1809
		if (idx < s_idx) {
			idx++;
			continue;
1810
		}
1811 1812 1813 1814 1815 1816 1817

		if (!range_start) {
			range_start = v;
			range_end = v;
			continue;
		}

1818 1819 1820
		if (dump_stats || v->vid == pvid ||
		    !br_vlan_can_enter_range(v, range_end)) {
			u16 vlan_flags = br_vlan_flags(range_start, pvid);
1821 1822

			if (!br_vlan_fill_vids(skb, range_start->vid,
1823
					       range_end->vid, range_start,
1824
					       vlan_flags, dump_stats)) {
1825 1826 1827 1828 1829 1830 1831 1832 1833
				err = -EMSGSIZE;
				break;
			}
			/* advance number of filled vlans */
			idx += range_end->vid - range_start->vid + 1;

			range_start = v;
		}
		range_end = v;
1834
	}
1835 1836 1837 1838 1839 1840 1841 1842

	/* err will be 0 and range_start will be set in 3 cases here:
	 * - first vlan (range_start == range_end)
	 * - last vlan (range_start == range_end, not in range)
	 * - last vlan range (range_start != range_end, in range)
	 */
	if (!err && range_start &&
	    !br_vlan_fill_vids(skb, range_start->vid, range_end->vid,
1843 1844
			       range_start, br_vlan_flags(range_start, pvid),
			       dump_stats))
1845 1846 1847 1848
		err = -EMSGSIZE;

	cb->args[1] = err ? idx : 0;

1849 1850 1851 1852 1853
	nlmsg_end(skb, nlh);

	return err;
}

1854 1855 1856 1857
static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
	[BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
};

1858 1859
static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
1860
	struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
1861 1862 1863 1864
	int idx = 0, err = 0, s_idx = cb->args[0];
	struct net *net = sock_net(skb->sk);
	struct br_vlan_msg *bvm;
	struct net_device *dev;
1865
	u32 dump_flags = 0;
1866

1867 1868
	err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
			  br_vlan_db_dump_pol, cb->extack);
1869 1870 1871 1872
	if (err < 0)
		return err;

	bvm = nlmsg_data(cb->nlh);
1873 1874
	if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
		dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
1875 1876 1877 1878 1879 1880 1881 1882

	rcu_read_lock();
	if (bvm->ifindex) {
		dev = dev_get_by_index_rcu(net, bvm->ifindex);
		if (!dev) {
			err = -ENODEV;
			goto out_err;
		}
1883
		err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
1884 1885 1886 1887 1888 1889 1890
		if (err && err != -EMSGSIZE)
			goto out_err;
	} else {
		for_each_netdev_rcu(net, dev) {
			if (idx < s_idx)
				goto skip;

1891
			err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
			if (err == -EMSGSIZE)
				break;
skip:
			idx++;
		}
	}
	cb->args[0] = idx;
	rcu_read_unlock();

	return skb->len;

out_err:
	rcu_read_unlock();

	return err;
}

1909
static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
1910 1911
	[BRIDGE_VLANDB_ENTRY_INFO]	=
		NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
1912
	[BRIDGE_VLANDB_ENTRY_RANGE]	= { .type = NLA_U16 },
1913
	[BRIDGE_VLANDB_ENTRY_STATE]	= { .type = NLA_U8 },
1914
	[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
1915 1916 1917 1918 1919 1920
};

static int br_vlan_rtm_process_one(struct net_device *dev,
				   const struct nlattr *attr,
				   int cmd, struct netlink_ext_ack *extack)
{
1921
	struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
1922
	struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
1923
	bool changed = false, skip_processing = false;
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951
	struct net_bridge_vlan_group *vg;
	struct net_bridge_port *p = NULL;
	int err = 0, cmdmap = 0;
	struct net_bridge *br;

	if (netif_is_bridge_master(dev)) {
		br = netdev_priv(dev);
		vg = br_vlan_group(br);
	} else {
		p = br_port_get_rtnl(dev);
		if (WARN_ON(!p))
			return -ENODEV;
		br = p->br;
		vg = nbp_vlan_group(p);
	}

	if (WARN_ON(!vg))
		return -ENODEV;

	err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
			       br_vlan_db_policy, extack);
	if (err)
		return err;

	if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
		NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
		return -EINVAL;
	}
1952
	memset(&vrange_end, 0, sizeof(vrange_end));
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962

	vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
	if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
			    BRIDGE_VLAN_INFO_RANGE_END)) {
		NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
		return -EINVAL;
	}
	if (!br_vlan_valid_id(vinfo->vid, extack))
		return -EINVAL;

1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977
	if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
		vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
		/* validate user-provided flags without RANGE_BEGIN */
		vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
		vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;

		/* vinfo_last is the range start, vinfo the range end */
		vinfo_last = vinfo;
		vinfo = &vrange_end;

		if (!br_vlan_valid_id(vinfo->vid, extack) ||
		    !br_vlan_valid_range(vinfo, vinfo_last, extack))
			return -EINVAL;
	}

1978 1979 1980
	switch (cmd) {
	case RTM_NEWVLAN:
		cmdmap = RTM_SETLINK;
1981
		skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
1982
		break;
1983 1984 1985
	case RTM_DELVLAN:
		cmdmap = RTM_DELLINK;
		break;
1986 1987
	}

1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
	if (!skip_processing) {
		struct bridge_vlan_info *tmp_last = vinfo_last;

		/* br_process_vlan_info may overwrite vinfo_last */
		err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
					   &changed, extack);

		/* notify first if anything changed */
		if (changed)
			br_ifinfo_notify(cmdmap, br, p);

		if (err)
			return err;
	}

	/* deal with options */
	if (cmd == RTM_NEWVLAN) {
		struct net_bridge_vlan *range_start, *range_end;

		if (vinfo_last) {
			range_start = br_vlan_find(vg, vinfo_last->vid);
			range_end = br_vlan_find(vg, vinfo->vid);
		} else {
			range_start = br_vlan_find(vg, vinfo->vid);
			range_end = range_start;
		}

		err = br_vlan_process_options(br, p, range_start, range_end,
					      tb, extack);
	}
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065

	return err;
}

static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
			       struct netlink_ext_ack *extack)
{
	struct net *net = sock_net(skb->sk);
	struct br_vlan_msg *bvm;
	struct net_device *dev;
	struct nlattr *attr;
	int err, vlans = 0;
	int rem;

	/* this should validate the header and check for remaining bytes */
	err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
			  extack);
	if (err < 0)
		return err;

	bvm = nlmsg_data(nlh);
	dev = __dev_get_by_index(net, bvm->ifindex);
	if (!dev)
		return -ENODEV;

	if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
		NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
		return -EINVAL;
	}

	nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
		if (nla_type(attr) != BRIDGE_VLANDB_ENTRY)
			continue;

		vlans++;
		err = br_vlan_rtm_process_one(dev, attr, nlh->nlmsg_type,
					      extack);
		if (err)
			break;
	}
	if (!vlans) {
		NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
		err = -EINVAL;
	}

	return err;
}

2066 2067 2068 2069
void br_vlan_rtnl_init(void)
{
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
			     br_vlan_rtm_dump, 0);
2070 2071
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
			     br_vlan_rtm_process, NULL, 0);
2072 2073
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
			     br_vlan_rtm_process, NULL, 0);
2074 2075 2076 2077 2078
}

void br_vlan_rtnl_uninit(void)
{
	rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
2079
	rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
2080
	rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
2081
}