br_vlan.c 52.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
6
#include <net/switchdev.h>
7 8

#include "br_private.h"
9
#include "br_private_tunnel.h"
10

11 12
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);

13 14
static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
			      const void *ptr)
15
{
16 17 18 19 20 21 22 23 24 25
	const struct net_bridge_vlan *vle = ptr;
	u16 vid = *(u16 *)arg->key;

	return vle->vid != vid;
}

static const struct rhashtable_params br_vlan_rht_params = {
	.head_offset = offsetof(struct net_bridge_vlan, vnode),
	.key_offset = offsetof(struct net_bridge_vlan, vid),
	.key_len = sizeof(u16),
26
	.nelem_hint = 3,
27 28 29 30 31 32 33 34 35 36
	.max_size = VLAN_N_VID,
	.obj_cmpfn = br_vlan_cmp,
	.automatic_shrinking = true,
};

static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
{
	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
}

37 38
static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
			    const struct net_bridge_vlan *v)
39
{
40
	if (vg->pvid == v->vid)
41
		return false;
42 43

	smp_wmb();
44 45
	br_vlan_set_pvid_state(vg, v->state);
	vg->pvid = v->vid;
46 47

	return true;
48 49
}

50
static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
51
{
52
	if (vg->pvid != vid)
53
		return false;
54 55

	smp_wmb();
56
	vg->pvid = 0;
57 58

	return true;
59 60
}

61 62
/* return true if anything changed, false otherwise */
static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
63
{
64
	struct net_bridge_vlan_group *vg;
65 66
	u16 old_flags = v->flags;
	bool ret;
67 68

	if (br_vlan_is_master(v))
69
		vg = br_vlan_group(v->br);
70
	else
71
		vg = nbp_vlan_group(v->port);
72 73

	if (flags & BRIDGE_VLAN_INFO_PVID)
74
		ret = __vlan_add_pvid(vg, v);
75
	else
76
		ret = __vlan_delete_pvid(vg, v->vid);
77 78

	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
79
		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
80
	else
81
		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
82 83

	return ret || !!(old_flags ^ v->flags);
84 85
}

86
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
87 88
			  struct net_bridge_vlan *v, u16 flags,
			  struct netlink_ext_ack *extack)
89 90 91
{
	int err;

92 93
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q add.
94
	 */
95
	err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
96
	if (err == -EOPNOTSUPP)
97 98
		return vlan_vid_add(dev, br->vlan_proto, v->vid);
	v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
99 100 101
	return err;
}

102
static void __vlan_add_list(struct net_bridge_vlan *v)
103
{
104
	struct net_bridge_vlan_group *vg;
105 106
	struct list_head *headp, *hpos;
	struct net_bridge_vlan *vent;
107

108 109 110 111 112 113
	if (br_vlan_is_master(v))
		vg = br_vlan_group(v->br);
	else
		vg = nbp_vlan_group(v->port);

	headp = &vg->vlan_list;
114 115
	list_for_each_prev(hpos, headp) {
		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
116
		if (v->vid >= vent->vid)
117
			break;
118
	}
119
	list_add_rcu(&v->vlist, hpos);
120
}
121

122 123
static void __vlan_del_list(struct net_bridge_vlan *v)
{
124
	list_del_rcu(&v->vlist);
125 126
}

127
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
128
			  const struct net_bridge_vlan *v)
129
{
130
	int err;
131

132 133
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q del.
134
	 */
135 136 137 138
	err = br_switchdev_port_vlan_del(dev, v->vid);
	if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
		vlan_vid_del(dev, br->vlan_proto, v->vid);
	return err == -EOPNOTSUPP ? 0 : err;
139 140
}

141
/* Returns a master vlan, if it didn't exist it gets created. In all cases
142 143
 * a reference is taken to the master vlan before returning.
 */
144 145 146
static struct net_bridge_vlan *
br_vlan_get_master(struct net_bridge *br, u16 vid,
		   struct netlink_ext_ack *extack)
147
{
148
	struct net_bridge_vlan_group *vg;
149 150
	struct net_bridge_vlan *masterv;

151 152
	vg = br_vlan_group(br);
	masterv = br_vlan_find(vg, vid);
153
	if (!masterv) {
154 155
		bool changed;

156
		/* missing global ctx, create it now */
157
		if (br_vlan_add(br, vid, 0, &changed, extack))
158
			return NULL;
159
		masterv = br_vlan_find(vg, vid);
160 161
		if (WARN_ON(!masterv))
			return NULL;
162 163
		refcount_set(&masterv->refcnt, 1);
		return masterv;
164
	}
165
	refcount_inc(&masterv->refcnt);
166 167 168 169

	return masterv;
}

170 171 172 173 174 175 176 177 178 179 180
static void br_master_vlan_rcu_free(struct rcu_head *rcu)
{
	struct net_bridge_vlan *v;

	v = container_of(rcu, struct net_bridge_vlan, rcu);
	WARN_ON(!br_vlan_is_master(v));
	free_percpu(v->stats);
	v->stats = NULL;
	kfree(v);
}

181 182
static void br_vlan_put_master(struct net_bridge_vlan *masterv)
{
183 184
	struct net_bridge_vlan_group *vg;

185 186 187
	if (!br_vlan_is_master(masterv))
		return;

188
	vg = br_vlan_group(masterv->br);
189
	if (refcount_dec_and_test(&masterv->refcnt)) {
190
		rhashtable_remove_fast(&vg->vlan_hash,
191 192
				       &masterv->vnode, br_vlan_rht_params);
		__vlan_del_list(masterv);
193
		br_multicast_toggle_one_vlan(masterv, false);
194
		br_multicast_ctx_deinit(&masterv->br_mcast_ctx);
195
		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
196 197 198
	}
}

199 200 201 202 203 204 205
static void nbp_vlan_rcu_free(struct rcu_head *rcu)
{
	struct net_bridge_vlan *v;

	v = container_of(rcu, struct net_bridge_vlan, rcu);
	WARN_ON(br_vlan_is_master(v));
	/* if we had per-port stats configured then free them here */
206
	if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
207 208 209 210 211
		free_percpu(v->stats);
	v->stats = NULL;
	kfree(v);
}

212 213 214 215
/* This is the shared VLAN add function which works for both ports and bridge
 * devices. There are four possible calls to this function in terms of the
 * vlan entry type:
 * 1. vlan is being added on a port (no master flags, global entry exists)
216
 * 2. vlan is being added on a bridge (both master and brentry flags)
217
 * 3. vlan is being added on a port, but a global entry didn't exist which
218
 *    is being created right now (master flag set, brentry flag unset), the
219
 *    global entry is used for global per-vlan features, but not for filtering
220
 * 4. same as 3 but with both master and brentry flags set so the entry
221 222
 *    will be used for filtering in both the port and the bridge
 */
223 224
static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
		      struct netlink_ext_ack *extack)
225
{
226 227
	struct net_bridge_vlan *masterv = NULL;
	struct net_bridge_port *p = NULL;
228
	struct net_bridge_vlan_group *vg;
229 230 231 232 233 234 235
	struct net_device *dev;
	struct net_bridge *br;
	int err;

	if (br_vlan_is_master(v)) {
		br = v->br;
		dev = br->dev;
236
		vg = br_vlan_group(br);
237 238 239 240
	} else {
		p = v->port;
		br = p->br;
		dev = p->dev;
241
		vg = nbp_vlan_group(p);
242 243 244 245 246 247 248
	}

	if (p) {
		/* Add VLAN to the device filter if it is supported.
		 * This ensures tagged traffic enters the bridge when
		 * promiscuous mode is disabled by br_manage_promisc().
		 */
249
		err = __vlan_vid_add(dev, br, v, flags, extack);
250 251 252 253 254
		if (err)
			goto out;

		/* need to work on the master vlan too */
		if (flags & BRIDGE_VLAN_INFO_MASTER) {
255 256 257 258
			bool changed;

			err = br_vlan_add(br, v->vid,
					  flags | BRIDGE_VLAN_INFO_BRENTRY,
259
					  &changed, extack);
260 261
			if (err)
				goto out_filt;
262 263 264 265

			if (changed)
				br_vlan_notify(br, NULL, v->vid, 0,
					       RTM_NEWVLAN);
266 267
		}

268
		masterv = br_vlan_get_master(br, v->vid, extack);
269 270
		if (!masterv) {
			err = -ENOMEM;
271
			goto out_filt;
272
		}
273
		v->brvlan = masterv;
274
		if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
275 276
			v->stats =
			     netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
277 278 279 280
			if (!v->stats) {
				err = -ENOMEM;
				goto out_filt;
			}
281
			v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
282 283 284
		} else {
			v->stats = masterv->stats;
		}
285
		br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx);
286
	} else {
287
		err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
288 289
		if (err && err != -EOPNOTSUPP)
			goto out;
290
		br_multicast_ctx_init(br, v, &v->br_mcast_ctx);
291
		v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
292 293
	}

294
	/* Add the dev mac and count the vlan only if it's usable */
295
	if (br_vlan_should_use(v)) {
296
		err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
297 298 299 300
		if (err) {
			br_err(br, "failed insert local address into bridge forwarding table\n");
			goto out_filt;
		}
301
		vg->num_vlans++;
302 303
	}

304 305 306
	/* set the state before publishing */
	v->state = BR_STATE_FORWARDING;

307 308
	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
					    br_vlan_rht_params);
309 310
	if (err)
		goto out_fdb_insert;
311

312 313
	__vlan_add_list(v);
	__vlan_add_flags(v, flags);
314
	br_multicast_toggle_one_vlan(v, true);
315 316 317

	if (p)
		nbp_vlan_set_vlan_dev_state(p, v->vid);
318 319 320 321
out:
	return err;

out_fdb_insert:
322 323 324 325
	if (br_vlan_should_use(v)) {
		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
		vg->num_vlans--;
	}
326 327 328

out_filt:
	if (p) {
329
		__vlan_vid_del(dev, br, v);
330
		if (masterv) {
331 332 333 334
			if (v->stats && masterv->stats != v->stats)
				free_percpu(v->stats);
			v->stats = NULL;

335
			br_vlan_put_master(masterv);
336 337
			v->brvlan = NULL;
		}
338 339
	} else {
		br_switchdev_port_vlan_del(dev, v->vid);
340 341 342 343 344 345 346 347
	}

	goto out;
}

static int __vlan_del(struct net_bridge_vlan *v)
{
	struct net_bridge_vlan *masterv = v;
348
	struct net_bridge_vlan_group *vg;
349 350
	struct net_bridge_port *p = NULL;
	int err = 0;
351

352
	if (br_vlan_is_master(v)) {
353
		vg = br_vlan_group(v->br);
354 355
	} else {
		p = v->port;
356
		vg = nbp_vlan_group(v->port);
357 358
		masterv = v->brvlan;
	}
359

360
	__vlan_delete_pvid(vg, v->vid);
361
	if (p) {
362
		err = __vlan_vid_del(p->dev, p->br, v);
363
		if (err)
364
			goto out;
365 366 367 368 369
	} else {
		err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
		if (err && err != -EOPNOTSUPP)
			goto out;
		err = 0;
370
	}
371

372 373 374
	if (br_vlan_should_use(v)) {
		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
		vg->num_vlans--;
375 376 377
	}

	if (masterv != v) {
378
		vlan_tunnel_info_del(vg, v);
379 380
		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
				       br_vlan_rht_params);
381
		__vlan_del_list(v);
382
		nbp_vlan_set_vlan_dev_state(p, v->vid);
383
		br_multicast_toggle_one_vlan(v, false);
384
		br_multicast_port_ctx_deinit(&v->port_mcast_ctx);
385
		call_rcu(&v->rcu, nbp_vlan_rcu_free);
386
	}
387

388
	br_vlan_put_master(masterv);
389 390
out:
	return err;
391 392
}

393 394 395 396
static void __vlan_group_free(struct net_bridge_vlan_group *vg)
{
	WARN_ON(!list_empty(&vg->vlan_list));
	rhashtable_destroy(&vg->vlan_hash);
397
	vlan_tunnel_deinit(vg);
398 399 400
	kfree(vg);
}

401 402 403
static void __vlan_flush(const struct net_bridge *br,
			 const struct net_bridge_port *p,
			 struct net_bridge_vlan_group *vg)
404
{
405
	struct net_bridge_vlan *vlan, *tmp;
406
	u16 v_start = 0, v_end = 0;
407

408
	__vlan_delete_pvid(vg, vg->pvid);
409 410 411 412 413 414 415 416 417 418 419
	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
		/* take care of disjoint ranges */
		if (!v_start) {
			v_start = vlan->vid;
		} else if (vlan->vid - v_end != 1) {
			/* found range end, notify and start next one */
			br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
			v_start = vlan->vid;
		}
		v_end = vlan->vid;

420
		__vlan_del(vlan);
421 422 423 424 425
	}

	/* notify about the last/whole vlan range */
	if (v_start)
		br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
426 427
}

428
struct sk_buff *br_handle_vlan(struct net_bridge *br,
429
			       const struct net_bridge_port *p,
430
			       struct net_bridge_vlan_group *vg,
431
			       struct sk_buff *skb)
432
{
433
	struct pcpu_sw_netstats *stats;
434
	struct net_bridge_vlan *v;
435 436
	u16 vid;

437 438
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
439 440
		goto out;

441 442 443 444 445 446 447
	/* At this point, we know that the frame was filtered and contains
	 * a valid vlan id.  If the vlan id has untagged flag set,
	 * send untagged; otherwise, send tagged.
	 */
	br_vlan_get_tag(skb, &vid);
	v = br_vlan_find(vg, vid);
	/* Vlan entry must be configured at this point.  The
448 449 450 451
	 * only exception is the bridge is set in promisc mode and the
	 * packet is destined for the bridge device.  In this case
	 * pass the packet as is.
	 */
452
	if (!v || !br_vlan_should_use(v)) {
453 454 455 456 457 458 459
		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
			goto out;
		} else {
			kfree_skb(skb);
			return NULL;
		}
	}
460
	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
461 462 463 464 465 466 467
		stats = this_cpu_ptr(v->stats);
		u64_stats_update_begin(&stats->syncp);
		stats->tx_bytes += skb->len;
		stats->tx_packets++;
		u64_stats_update_end(&stats->syncp);
	}

468 469 470 471 472 473 474 475 476
	/* If the skb will be sent using forwarding offload, the assumption is
	 * that the switchdev will inject the packet into hardware together
	 * with the bridge VLAN, so that it can be forwarded according to that
	 * VLAN. The switchdev should deal with popping the VLAN header in
	 * hardware on each egress port as appropriate. So only strip the VLAN
	 * header if forwarding offload is not being used.
	 */
	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED &&
	    !br_switchdev_frame_uses_tx_fwd_offload(skb))
477
		__vlan_hwaccel_clear_tag(skb);
478 479 480 481 482 483

	if (p && (p->flags & BR_VLAN_TUNNEL) &&
	    br_handle_egress_vlan_tunnel(skb, v)) {
		kfree_skb(skb);
		return NULL;
	}
484 485 486 487 488
out:
	return skb;
}

/* Called under RCU */
489 490
static bool __allowed_ingress(const struct net_bridge *br,
			      struct net_bridge_vlan_group *vg,
491
			      struct sk_buff *skb, u16 *vid,
492 493
			      u8 *state,
			      struct net_bridge_vlan **vlan)
494
{
495
	struct pcpu_sw_netstats *stats;
496
	struct net_bridge_vlan *v;
497
	bool tagged;
498

499
	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
500 501 502 503
	/* If vlan tx offload is disabled on bridge device and frame was
	 * sent from vlan device on the bridge device, it does not have
	 * HW accelerated vlan tag.
	 */
504
	if (unlikely(!skb_vlan_tag_present(skb) &&
505
		     skb->protocol == br->vlan_proto)) {
506
		skb = skb_vlan_untag(skb);
507 508 509 510
		if (unlikely(!skb))
			return false;
	}

511 512
	if (!br_vlan_get_tag(skb, vid)) {
		/* Tagged frame */
513
		if (skb->vlan_proto != br->vlan_proto) {
514 515
			/* Protocol-mismatch, empty out vlan_tci for new tag */
			skb_push(skb, ETH_HLEN);
516
			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
517
							skb_vlan_tag_get(skb));
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
			if (unlikely(!skb))
				return false;

			skb_pull(skb, ETH_HLEN);
			skb_reset_mac_len(skb);
			*vid = 0;
			tagged = false;
		} else {
			tagged = true;
		}
	} else {
		/* Untagged frame */
		tagged = false;
	}

533
	if (!*vid) {
534 535
		u16 pvid = br_get_pvid(vg);

536 537 538
		/* Frame had a tag with VID 0 or did not have a tag.
		 * See if pvid is set on this port.  That tells us which
		 * vlan untagged or priority-tagged traffic belongs to.
539
		 */
V
Vlad Yasevich 已提交
540
		if (!pvid)
541
			goto drop;
542

543 544
		/* PVID is set on this port.  Any untagged or priority-tagged
		 * ingress frame is considered to belong to this vlan.
545
		 */
546
		*vid = pvid;
547
		if (likely(!tagged))
548
			/* Untagged Frame. */
549
			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
550 551
		else
			/* Priority-tagged Frame.
552 553
			 * At this point, we know that skb->vlan_tci VID
			 * field was 0.
554 555 556 557
			 * We update only VID field and preserve PCP field.
			 */
			skb->vlan_tci |= pvid;

558 559 560
		/* if snooping and stats are disabled we can avoid the lookup */
		if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
		    !br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
561 562 563 564 565 566 567
			if (*state == BR_STATE_FORWARDING) {
				*state = br_vlan_get_pvid_state(vg);
				return br_vlan_state_allowed(*state, true);
			} else {
				return true;
			}
		}
568
	}
569
	v = br_vlan_find(vg, *vid);
570 571 572
	if (!v || !br_vlan_should_use(v))
		goto drop;

573 574 575 576 577 578
	if (*state == BR_STATE_FORWARDING) {
		*state = br_vlan_get_state(v);
		if (!br_vlan_state_allowed(*state, true))
			goto drop;
	}

579
	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
580 581 582 583 584 585 586
		stats = this_cpu_ptr(v->stats);
		u64_stats_update_begin(&stats->syncp);
		stats->rx_bytes += skb->len;
		stats->rx_packets++;
		u64_stats_update_end(&stats->syncp);
	}

587 588
	*vlan = v;

589 590
	return true;

591 592
drop:
	kfree_skb(skb);
593 594 595
	return false;
}

596 597
bool br_allowed_ingress(const struct net_bridge *br,
			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
598 599
			u16 *vid, u8 *state,
			struct net_bridge_vlan **vlan)
600 601 602 603
{
	/* If VLAN filtering is disabled on the bridge, all packets are
	 * permitted.
	 */
604
	*vlan = NULL;
605
	if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
606 607 608 609
		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
		return true;
	}

610
	return __allowed_ingress(br, vg, skb, vid, state, vlan);
611 612
}

613
/* Called under RCU. */
614
bool br_allowed_egress(struct net_bridge_vlan_group *vg,
615 616
		       const struct sk_buff *skb)
{
617
	const struct net_bridge_vlan *v;
618 619
	u16 vid;

620 621
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
622 623 624
		return true;

	br_vlan_get_tag(skb, &vid);
625
	v = br_vlan_find(vg, vid);
626 627
	if (v && br_vlan_should_use(v) &&
	    br_vlan_state_allowed(br_vlan_get_state(v), false))
628 629 630 631 632
		return true;

	return false;
}

633 634 635
/* Called under RCU */
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
{
636
	struct net_bridge_vlan_group *vg;
637
	struct net_bridge *br = p->br;
638
	struct net_bridge_vlan *v;
639

640
	/* If filtering was disabled at input, let it pass. */
641
	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
642 643
		return true;

644
	vg = nbp_vlan_group_rcu(p);
645
	if (!vg || !vg->num_vlans)
646 647
		return false;

648 649 650
	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
		*vid = 0;

651
	if (!*vid) {
652
		*vid = br_get_pvid(vg);
653 654
		if (!*vid ||
		    !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
655 656 657 658 659
			return false;

		return true;
	}

660 661
	v = br_vlan_find(vg, *vid);
	if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
662 663 664 665 666
		return true;

	return false;
}

667 668 669
static int br_vlan_add_existing(struct net_bridge *br,
				struct net_bridge_vlan_group *vg,
				struct net_bridge_vlan *vlan,
670 671
				u16 flags, bool *changed,
				struct netlink_ext_ack *extack)
672 673 674
{
	int err;

675
	err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
676 677 678
	if (err && err != -EOPNOTSUPP)
		return err;

679 680
	if (!br_vlan_is_brentry(vlan)) {
		/* Trying to change flags of non-existent bridge vlan */
681 682 683 684
		if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
			err = -EINVAL;
			goto err_flags;
		}
685
		/* It was only kept for port vlans, now make it real */
686
		err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid);
687 688
		if (err) {
			br_err(br, "failed to insert local address into bridge forwarding table\n");
689
			goto err_fdb_insert;
690 691 692 693 694 695
		}

		refcount_inc(&vlan->refcnt);
		vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
		vg->num_vlans++;
		*changed = true;
696
		br_multicast_toggle_one_vlan(vlan, true);
697 698 699 700 701 702
	}

	if (__vlan_add_flags(vlan, flags))
		*changed = true;

	return 0;
703 704 705 706 707

err_fdb_insert:
err_flags:
	br_switchdev_port_vlan_del(br->dev, vlan->vid);
	return err;
708 709
}

710 711
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
712
 * changed must be true only if the vlan was created or updated
713
 */
714 715
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
		struct netlink_ext_ack *extack)
716
{
717
	struct net_bridge_vlan_group *vg;
718 719
	struct net_bridge_vlan *vlan;
	int ret;
720 721 722

	ASSERT_RTNL();

723
	*changed = false;
724 725
	vg = br_vlan_group(br);
	vlan = br_vlan_find(vg, vid);
726
	if (vlan)
727 728
		return br_vlan_add_existing(br, vg, vlan, flags, changed,
					    extack);
729

730 731
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
732 733
		return -ENOMEM;

734
	vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
735 736 737 738
	if (!vlan->stats) {
		kfree(vlan);
		return -ENOMEM;
	}
739 740 741 742 743
	vlan->vid = vid;
	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
	vlan->br = br;
	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
744
		refcount_set(&vlan->refcnt, 1);
745
	ret = __vlan_add(vlan, flags, extack);
746 747
	if (ret) {
		free_percpu(vlan->stats);
748
		kfree(vlan);
749 750
	} else {
		*changed = true;
751
	}
752

753
	return ret;
754 755
}

756 757 758
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
759 760
int br_vlan_delete(struct net_bridge *br, u16 vid)
{
761
	struct net_bridge_vlan_group *vg;
762
	struct net_bridge_vlan *v;
763 764 765

	ASSERT_RTNL();

766 767
	vg = br_vlan_group(br);
	v = br_vlan_find(vg, vid);
768 769
	if (!v || !br_vlan_is_brentry(v))
		return -ENOENT;
770

771
	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
772
	br_fdb_delete_by_port(br, NULL, vid, 0);
773

774 775
	vlan_tunnel_info_del(vg, v);

776
	return __vlan_del(v);
777 778 779 780
}

void br_vlan_flush(struct net_bridge *br)
{
781 782
	struct net_bridge_vlan_group *vg;

783 784
	ASSERT_RTNL();

785
	vg = br_vlan_group(br);
786
	__vlan_flush(br, NULL, vg);
787 788 789
	RCU_INIT_POINTER(br->vlgrp, NULL);
	synchronize_rcu();
	__vlan_group_free(vg);
790 791
}

792
struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
793
{
794 795
	if (!vg)
		return NULL;
796

797
	return br_vlan_lookup(&vg->vlan_hash, vid);
798 799
}

800 801 802
/* Must be protected by RTNL. */
static void recalculate_group_addr(struct net_bridge *br)
{
803
	if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
804 805 806
		return;

	spin_lock_bh(&br->lock);
807 808
	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
	    br->vlan_proto == htons(ETH_P_8021Q)) {
809 810 811 812 813 814 815 816 817 818 819 820
		/* Bridge Group Address */
		br->group_addr[5] = 0x00;
	} else { /* vlan_enabled && ETH_P_8021AD */
		/* Provider Bridge Group Address */
		br->group_addr[5] = 0x08;
	}
	spin_unlock_bh(&br->lock);
}

/* Must be protected by RTNL. */
void br_recalculate_fwd_mask(struct net_bridge *br)
{
821 822
	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
	    br->vlan_proto == htons(ETH_P_8021Q))
823 824 825 826 827 828
		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
	else /* vlan_enabled && ETH_P_8021AD */
		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
					      ~(1u << br->group_addr[5]);
}

829 830
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val,
			  struct netlink_ext_ack *extack)
831
{
832 833 834 835 836 837 838 839
	struct switchdev_attr attr = {
		.orig_dev = br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
		.u.vlan_filtering = val,
	};
	int err;

840
	if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
841
		return 0;
842

843 844
	br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);

845
	err = switchdev_port_attr_set(br->dev, &attr, extack);
846 847
	if (err && err != -EOPNOTSUPP) {
		br_opt_toggle(br, BROPT_VLAN_ENABLED, !val);
848
		return err;
849
	}
850

851
	br_manage_promisc(br);
852 853
	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);
854 855 856 857
	if (!val && br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
		br_info(br, "vlan filtering disabled, automatically disabling multicast vlan snooping\n");
		br_multicast_toggle_vlan_snooping(br, false, NULL);
	}
858

859 860 861
	return 0;
}

862 863 864 865
bool br_vlan_enabled(const struct net_device *dev)
{
	struct net_bridge *br = netdev_priv(dev);

866
	return br_opt_get(br, BROPT_VLAN_ENABLED);
867 868 869
}
EXPORT_SYMBOL_GPL(br_vlan_enabled);

W
wenxu 已提交
870 871 872 873 874 875 876 877 878 879
int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
{
	struct net_bridge *br = netdev_priv(dev);

	*p_proto = ntohs(br->vlan_proto);

	return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_proto);

880 881
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
			struct netlink_ext_ack *extack)
882
{
883 884 885 886 887 888
	struct switchdev_attr attr = {
		.orig_dev = br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
		.u.vlan_protocol = ntohs(proto),
	};
889 890
	int err = 0;
	struct net_bridge_port *p;
891
	struct net_bridge_vlan *vlan;
892
	struct net_bridge_vlan_group *vg;
893
	__be16 oldproto = br->vlan_proto;
894 895

	if (br->vlan_proto == proto)
896
		return 0;
897

898
	err = switchdev_port_attr_set(br->dev, &attr, extack);
899 900 901
	if (err && err != -EOPNOTSUPP)
		return err;

902 903
	/* Add VLANs for the new proto to the device filter. */
	list_for_each_entry(p, &br->port_list, list) {
904 905
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
906
			err = vlan_vid_add(p->dev, proto, vlan->vid);
907 908 909 910 911 912 913 914 915 916 917
			if (err)
				goto err_filt;
		}
	}

	br->vlan_proto = proto;

	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);

	/* Delete VLANs for the old proto from the device filter. */
918 919 920
	list_for_each_entry(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist)
921
			vlan_vid_del(p->dev, oldproto, vlan->vid);
922
	}
923

924
	return 0;
925 926

err_filt:
927
	attr.u.vlan_protocol = ntohs(oldproto);
928
	switchdev_port_attr_set(br->dev, &attr, NULL);
929

930
	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
931
		vlan_vid_del(p->dev, proto, vlan->vid);
932

933 934 935
	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist)
936
			vlan_vid_del(p->dev, proto, vlan->vid);
937
	}
938

939 940 941
	return err;
}

942 943
int br_vlan_set_proto(struct net_bridge *br, unsigned long val,
		      struct netlink_ext_ack *extack)
944
{
945
	if (!eth_type_vlan(htons(val)))
946 947
		return -EPROTONOSUPPORT;

948
	return __br_vlan_set_proto(br, htons(val), extack);
949 950
}

951 952 953 954 955
int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
{
	switch (val) {
	case 0:
	case 1:
956
		br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
957 958
		break;
	default:
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
		return -EINVAL;
	}

	return 0;
}

int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
{
	struct net_bridge_port *p;

	/* allow to change the option if there are no port vlans configured */
	list_for_each_entry(p, &br->port_list, list) {
		struct net_bridge_vlan_group *vg = nbp_vlan_group(p);

		if (vg->num_vlans)
			return -EBUSY;
	}

	switch (val) {
	case 0:
	case 1:
		br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
		break;
	default:
983 984 985 986 987 988
		return -EINVAL;
	}

	return 0;
}

989
static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
990
{
991 992
	struct net_bridge_vlan *v;

993
	if (vid != vg->pvid)
994 995 996 997 998 999 1000 1001
		return false;

	v = br_vlan_lookup(&vg->vlan_hash, vid);
	if (v && br_vlan_should_use(v) &&
	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
		return true;

	return false;
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
}

static void br_vlan_disable_default_pvid(struct net_bridge *br)
{
	struct net_bridge_port *p;
	u16 pvid = br->default_pvid;

	/* Disable default_pvid on all ports where it is still
	 * configured.
	 */
1012 1013 1014 1015
	if (vlan_default_pvid(br_vlan_group(br), pvid)) {
		if (!br_vlan_delete(br, pvid))
			br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
	}
1016 1017

	list_for_each_entry(p, &br->port_list, list) {
1018 1019 1020
		if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
		    !nbp_vlan_delete(p, pvid))
			br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1021 1022 1023 1024 1025
	}

	br->default_pvid = 0;
}

1026 1027
int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
			       struct netlink_ext_ack *extack)
1028
{
1029
	const struct net_bridge_vlan *pvent;
1030
	struct net_bridge_vlan_group *vg;
1031
	struct net_bridge_port *p;
1032 1033
	unsigned long *changed;
	bool vlchange;
1034 1035 1036
	u16 old_pvid;
	int err = 0;

1037 1038 1039 1040 1041
	if (!pvid) {
		br_vlan_disable_default_pvid(br);
		return 0;
	}

1042
	changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
1043 1044 1045 1046 1047 1048 1049 1050
	if (!changed)
		return -ENOMEM;

	old_pvid = br->default_pvid;

	/* Update default_pvid config only if we do not conflict with
	 * user configuration.
	 */
1051 1052 1053
	vg = br_vlan_group(br);
	pvent = br_vlan_find(vg, pvid);
	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
1054
	    (!pvent || !br_vlan_should_use(pvent))) {
1055 1056
		err = br_vlan_add(br, pvid,
				  BRIDGE_VLAN_INFO_PVID |
1057
				  BRIDGE_VLAN_INFO_UNTAGGED |
1058
				  BRIDGE_VLAN_INFO_BRENTRY,
1059
				  &vlchange, extack);
1060 1061
		if (err)
			goto out;
1062 1063 1064 1065

		if (br_vlan_delete(br, old_pvid))
			br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
		br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
1066
		__set_bit(0, changed);
1067 1068 1069 1070 1071 1072
	}

	list_for_each_entry(p, &br->port_list, list) {
		/* Update default_pvid config only if we do not conflict with
		 * user configuration.
		 */
1073
		vg = nbp_vlan_group(p);
1074
		if ((old_pvid &&
1075 1076
		     !vlan_default_pvid(vg, old_pvid)) ||
		    br_vlan_find(vg, pvid))
1077 1078 1079 1080
			continue;

		err = nbp_vlan_add(p, pvid,
				   BRIDGE_VLAN_INFO_PVID |
1081
				   BRIDGE_VLAN_INFO_UNTAGGED,
1082
				   &vlchange, extack);
1083 1084
		if (err)
			goto err_port;
1085 1086 1087
		if (nbp_vlan_delete(p, old_pvid))
			br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
		br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
1088
		__set_bit(p->port_no, changed);
1089 1090 1091 1092 1093
	}

	br->default_pvid = pvid;

out:
1094
	bitmap_free(changed);
1095 1096 1097 1098 1099 1100 1101
	return err;

err_port:
	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
		if (!test_bit(p->port_no, changed))
			continue;

1102
		if (old_pvid) {
1103 1104
			nbp_vlan_add(p, old_pvid,
				     BRIDGE_VLAN_INFO_PVID |
1105
				     BRIDGE_VLAN_INFO_UNTAGGED,
1106
				     &vlchange, NULL);
1107 1108
			br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
		}
1109
		nbp_vlan_delete(p, pvid);
1110
		br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1111 1112 1113
	}

	if (test_bit(0, changed)) {
1114
		if (old_pvid) {
1115 1116
			br_vlan_add(br, old_pvid,
				    BRIDGE_VLAN_INFO_PVID |
1117
				    BRIDGE_VLAN_INFO_UNTAGGED |
1118
				    BRIDGE_VLAN_INFO_BRENTRY,
1119
				    &vlchange, NULL);
1120 1121
			br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
		}
1122
		br_vlan_delete(br, pvid);
1123
		br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1124 1125 1126 1127
	}
	goto out;
}

1128 1129
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val,
			     struct netlink_ext_ack *extack)
1130 1131 1132 1133
{
	u16 pvid = val;
	int err = 0;

1134
	if (val >= VLAN_VID_MASK)
1135 1136 1137
		return -EINVAL;

	if (pvid == br->default_pvid)
1138
		goto out;
1139 1140

	/* Only allow default pvid change when filtering is disabled */
1141
	if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1142 1143
		pr_info_once("Please disable vlan filtering to change default_pvid\n");
		err = -EPERM;
1144
		goto out;
1145
	}
1146
	err = __br_vlan_set_default_pvid(br, pvid, extack);
1147
out:
1148 1149 1150
	return err;
}

1151
int br_vlan_init(struct net_bridge *br)
1152
{
1153
	struct net_bridge_vlan_group *vg;
1154 1155
	int ret = -ENOMEM;

1156 1157
	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
	if (!vg)
1158
		goto out;
1159
	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1160 1161
	if (ret)
		goto err_rhtbl;
1162 1163 1164
	ret = vlan_tunnel_init(vg);
	if (ret)
		goto err_tunnel_init;
1165
	INIT_LIST_HEAD(&vg->vlan_list);
1166
	br->vlan_proto = htons(ETH_P_8021Q);
1167
	br->default_pvid = 1;
1168
	rcu_assign_pointer(br->vlgrp, vg);
1169 1170 1171 1172

out:
	return ret;

1173
err_tunnel_init:
1174
	rhashtable_destroy(&vg->vlan_hash);
1175
err_rhtbl:
1176
	kfree(vg);
1177 1178 1179 1180

	goto out;
}

1181
int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1182
{
1183 1184 1185 1186
	struct switchdev_attr attr = {
		.orig_dev = p->br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1187
		.u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1188
	};
1189
	struct net_bridge_vlan_group *vg;
1190 1191
	int ret = -ENOMEM;

1192 1193
	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
	if (!vg)
1194 1195
		goto out;

1196
	ret = switchdev_port_attr_set(p->dev, &attr, extack);
1197 1198 1199
	if (ret && ret != -EOPNOTSUPP)
		goto err_vlan_enabled;

1200
	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1201 1202
	if (ret)
		goto err_rhtbl;
1203 1204 1205
	ret = vlan_tunnel_init(vg);
	if (ret)
		goto err_tunnel_init;
1206
	INIT_LIST_HEAD(&vg->vlan_list);
1207
	rcu_assign_pointer(p->vlgrp, vg);
1208
	if (p->br->default_pvid) {
1209 1210
		bool changed;

1211 1212
		ret = nbp_vlan_add(p, p->br->default_pvid,
				   BRIDGE_VLAN_INFO_PVID |
1213
				   BRIDGE_VLAN_INFO_UNTAGGED,
1214
				   &changed, extack);
1215 1216
		if (ret)
			goto err_vlan_add;
1217
		br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
1218 1219 1220 1221 1222
	}
out:
	return ret;

err_vlan_add:
1223 1224
	RCU_INIT_POINTER(p->vlgrp, NULL);
	synchronize_rcu();
1225 1226 1227
	vlan_tunnel_deinit(vg);
err_tunnel_init:
	rhashtable_destroy(&vg->vlan_hash);
1228
err_rhtbl:
1229
err_vlan_enabled:
1230
	kfree(vg);
1231 1232

	goto out;
1233 1234
}

1235 1236
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
1237
 * changed must be true only if the vlan was created or updated
1238
 */
1239
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1240
		 bool *changed, struct netlink_ext_ack *extack)
1241
{
1242 1243
	struct net_bridge_vlan *vlan;
	int ret;
1244 1245 1246

	ASSERT_RTNL();

1247
	*changed = false;
1248
	vlan = br_vlan_find(nbp_vlan_group(port), vid);
1249
	if (vlan) {
1250
		/* Pass the flags to the hardware bridge */
1251
		ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1252 1253
		if (ret && ret != -EOPNOTSUPP)
			return ret;
1254 1255
		*changed = __vlan_add_flags(vlan, flags);

1256
		return 0;
1257 1258
	}

1259 1260 1261
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
		return -ENOMEM;
1262

1263 1264
	vlan->vid = vid;
	vlan->port = port;
1265
	ret = __vlan_add(vlan, flags, extack);
1266 1267
	if (ret)
		kfree(vlan);
1268 1269
	else
		*changed = true;
1270

1271
	return ret;
1272 1273
}

1274 1275 1276
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
1277 1278
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{
1279
	struct net_bridge_vlan *v;
1280 1281 1282

	ASSERT_RTNL();

1283
	v = br_vlan_find(nbp_vlan_group(port), vid);
1284 1285
	if (!v)
		return -ENOENT;
1286
	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1287
	br_fdb_delete_by_port(port->br, port, vid, 0);
1288

1289
	return __vlan_del(v);
1290 1291 1292 1293
}

void nbp_vlan_flush(struct net_bridge_port *port)
{
1294 1295
	struct net_bridge_vlan_group *vg;

1296 1297
	ASSERT_RTNL();

1298
	vg = nbp_vlan_group(port);
1299
	__vlan_flush(port->br, port, vg);
1300 1301 1302
	RCU_INIT_POINTER(port->vlgrp, NULL);
	synchronize_rcu();
	__vlan_group_free(vg);
1303
}
1304 1305

void br_vlan_get_stats(const struct net_bridge_vlan *v,
1306
		       struct pcpu_sw_netstats *stats)
1307 1308 1309 1310 1311 1312
{
	int i;

	memset(stats, 0, sizeof(*stats));
	for_each_possible_cpu(i) {
		u64 rxpackets, rxbytes, txpackets, txbytes;
1313
		struct pcpu_sw_netstats *cpu_stats;
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
		unsigned int start;

		cpu_stats = per_cpu_ptr(v->stats, i);
		do {
			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
			rxpackets = cpu_stats->rx_packets;
			rxbytes = cpu_stats->rx_bytes;
			txbytes = cpu_stats->tx_bytes;
			txpackets = cpu_stats->tx_packets;
		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));

		stats->rx_packets += rxpackets;
		stats->rx_bytes += rxbytes;
		stats->tx_bytes += txbytes;
		stats->tx_packets += txpackets;
	}
}
1331

1332
int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1333 1334
{
	struct net_bridge_vlan_group *vg;
1335
	struct net_bridge_port *p;
1336

1337 1338
	ASSERT_RTNL();
	p = br_port_get_check_rtnl(dev);
1339 1340 1341
	if (p)
		vg = nbp_vlan_group(p);
	else if (netif_is_bridge_master(dev))
1342 1343 1344 1345 1346 1347 1348 1349 1350
		vg = br_vlan_group(netdev_priv(dev));
	else
		return -EINVAL;

	*p_pvid = br_get_pvid(vg);
	return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_pvid);

1351 1352
int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
{
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
	struct net_bridge_vlan_group *vg;
	struct net_bridge_port *p;

	p = br_port_get_check_rcu(dev);
	if (p)
		vg = nbp_vlan_group_rcu(p);
	else if (netif_is_bridge_master(dev))
		vg = br_vlan_group_rcu(netdev_priv(dev));
	else
		return -EINVAL;

	*p_pvid = br_get_pvid(vg);
	return 0;
1366 1367 1368
}
EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);

1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
				    struct net_device_path_ctx *ctx,
				    struct net_device_path *path)
{
	struct net_bridge_vlan_group *vg;
	int idx = ctx->num_vlans - 1;
	u16 vid;

	path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;

	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
		return;

	vg = br_vlan_group(br);

	if (idx >= 0 &&
	    ctx->vlan[idx].proto == br->vlan_proto) {
		vid = ctx->vlan[idx].id;
	} else {
		path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG;
		vid = br_get_pvid(vg);
	}

	path->bridge.vlan_id = vid;
	path->bridge.vlan_proto = br->vlan_proto;
}

int br_vlan_fill_forward_path_mode(struct net_bridge *br,
				   struct net_bridge_port *dst,
				   struct net_device_path *path)
{
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *v;

	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
		return 0;

	vg = nbp_vlan_group_rcu(dst);
	v = br_vlan_find(vg, path->bridge.vlan_id);
	if (!v || !br_vlan_should_use(v))
		return -EINVAL;

	if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
		return 0;

	if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
		path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1416 1417
	else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
		path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
1418 1419 1420 1421 1422 1423
	else
		path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;

	return 0;
}

1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
int br_vlan_get_info(const struct net_device *dev, u16 vid,
		     struct bridge_vlan_info *p_vinfo)
{
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *v;
	struct net_bridge_port *p;

	ASSERT_RTNL();
	p = br_port_get_check_rtnl(dev);
	if (p)
		vg = nbp_vlan_group(p);
1435 1436
	else if (netif_is_bridge_master(dev))
		vg = br_vlan_group(netdev_priv(dev));
1437 1438 1439 1440 1441 1442 1443 1444 1445
	else
		return -EINVAL;

	v = br_vlan_find(vg, vid);
	if (!v)
		return -ENOENT;

	p_vinfo->vid = vid;
	p_vinfo->flags = v->flags;
1446 1447
	if (vid == br_get_pvid(vg))
		p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1448 1449 1450
	return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_info);
1451

1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
			 struct bridge_vlan_info *p_vinfo)
{
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *v;
	struct net_bridge_port *p;

	p = br_port_get_check_rcu(dev);
	if (p)
		vg = nbp_vlan_group_rcu(p);
	else if (netif_is_bridge_master(dev))
		vg = br_vlan_group_rcu(netdev_priv(dev));
	else
		return -EINVAL;

	v = br_vlan_find(vg, vid);
	if (!v)
		return -ENOENT;

	p_vinfo->vid = vid;
	p_vinfo->flags = v->flags;
	if (vid == br_get_pvid(vg))
		p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
	return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_info_rcu);

1479 1480 1481 1482 1483 1484 1485
static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
{
	return is_vlan_dev(dev) &&
		!!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
}

static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1486
			       __always_unused struct netdev_nested_priv *priv)
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
{
	return br_vlan_is_bind_vlan_dev(dev);
}

static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
{
	int found;

	rcu_read_lock();
	found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
					      NULL);
	rcu_read_unlock();

	return !!found;
}

struct br_vlan_bind_walk_data {
	u16 vid;
	struct net_device *result;
};

static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1509
					  struct netdev_nested_priv *priv)
1510
{
1511
	struct br_vlan_bind_walk_data *data = priv->data;
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
	int found = 0;

	if (br_vlan_is_bind_vlan_dev(dev) &&
	    vlan_dev_priv(dev)->vlan_id == data->vid) {
		data->result = dev;
		found = 1;
	}

	return found;
}

static struct net_device *
br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
{
	struct br_vlan_bind_walk_data data = {
		.vid = vid,
	};
1529 1530 1531
	struct netdev_nested_priv priv = {
		.data = (void *)&data,
	};
1532 1533 1534

	rcu_read_lock();
	netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1535
				      &priv);
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
	rcu_read_unlock();

	return data.result;
}

static bool br_vlan_is_dev_up(const struct net_device *dev)
{
	return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
}

static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
				       struct net_device *vlan_dev)
{
	u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
	struct net_bridge_vlan_group *vg;
	struct net_bridge_port *p;
	bool has_carrier = false;

1554 1555 1556 1557 1558
	if (!netif_carrier_ok(br->dev)) {
		netif_carrier_off(vlan_dev);
		return;
	}

1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
	list_for_each_entry(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
			has_carrier = true;
			break;
		}
	}

	if (has_carrier)
		netif_carrier_on(vlan_dev);
	else
		netif_carrier_off(vlan_dev);
}

static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
{
	struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
	struct net_bridge_vlan *vlan;
	struct net_device *vlan_dev;

	list_for_each_entry(vlan, &vg->vlan_list, vlist) {
		vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
							   vlan->vid);
		if (vlan_dev) {
1583 1584 1585 1586
			if (br_vlan_is_dev_up(p->dev)) {
				if (netif_carrier_ok(p->br->dev))
					netif_carrier_on(vlan_dev);
			} else {
1587
				br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1588
			}
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
		}
	}
}

static void br_vlan_upper_change(struct net_device *dev,
				 struct net_device *upper_dev,
				 bool linking)
{
	struct net_bridge *br = netdev_priv(dev);

	if (!br_vlan_is_bind_vlan_dev(upper_dev))
		return;

	if (linking) {
		br_vlan_set_vlan_dev_state(br, upper_dev);
		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
	} else {
		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
			      br_vlan_has_upper_bind_vlan_dev(dev));
	}
}

1611 1612 1613 1614 1615
struct br_vlan_link_state_walk_data {
	struct net_bridge *br;
};

static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1616
					struct netdev_nested_priv *priv)
1617
{
1618
	struct br_vlan_link_state_walk_data *data = priv->data;
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631

	if (br_vlan_is_bind_vlan_dev(vlan_dev))
		br_vlan_set_vlan_dev_state(data->br, vlan_dev);

	return 0;
}

static void br_vlan_link_state_change(struct net_device *dev,
				      struct net_bridge *br)
{
	struct br_vlan_link_state_walk_data data = {
		.br = br
	};
1632 1633 1634
	struct netdev_nested_priv priv = {
		.data = (void *)&data,
	};
1635 1636 1637

	rcu_read_lock();
	netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1638
				      &priv);
1639 1640 1641
	rcu_read_unlock();
}

1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
/* Must be protected by RTNL. */
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
{
	struct net_device *vlan_dev;

	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
		return;

	vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
	if (vlan_dev)
		br_vlan_set_vlan_dev_state(p->br, vlan_dev);
}

1655
/* Must be protected by RTNL. */
1656
int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1657 1658
{
	struct netdev_notifier_changeupper_info *info;
1659
	struct net_bridge *br = netdev_priv(dev);
1660 1661
	int vlcmd = 0, ret = 0;
	bool changed = false;
1662 1663

	switch (event) {
1664 1665 1666 1667 1668
	case NETDEV_REGISTER:
		ret = br_vlan_add(br, br->default_pvid,
				  BRIDGE_VLAN_INFO_PVID |
				  BRIDGE_VLAN_INFO_UNTAGGED |
				  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1669
		vlcmd = RTM_NEWVLAN;
1670 1671
		break;
	case NETDEV_UNREGISTER:
1672 1673
		changed = !br_vlan_delete(br, br->default_pvid);
		vlcmd = RTM_DELVLAN;
1674
		break;
1675 1676 1677 1678
	case NETDEV_CHANGEUPPER:
		info = ptr;
		br_vlan_upper_change(dev, info->upper_dev, info->linking);
		break;
1679 1680 1681 1682

	case NETDEV_CHANGE:
	case NETDEV_UP:
		if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1683
			break;
1684 1685
		br_vlan_link_state_change(dev, br);
		break;
1686
	}
1687 1688
	if (changed)
		br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1689 1690

	return ret;
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
}

/* Must be protected by RTNL. */
void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
{
	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
		return;

	switch (event) {
	case NETDEV_CHANGE:
	case NETDEV_DOWN:
	case NETDEV_UP:
		br_vlan_set_all_vlan_dev_state(p);
		break;
	}
}
1707

1708 1709 1710
static bool br_vlan_stats_fill(struct sk_buff *skb,
			       const struct net_bridge_vlan *v)
{
1711
	struct pcpu_sw_netstats stats;
1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
	struct nlattr *nest;

	nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
	if (!nest)
		return false;

	br_vlan_get_stats(v, &stats);
	if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
			      BRIDGE_VLANDB_STATS_PAD) ||
	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
			      stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
			      BRIDGE_VLANDB_STATS_PAD) ||
	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
			      stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
		goto out_err;

	nla_nest_end(skb, nest);

	return true;

out_err:
	nla_nest_cancel(skb, nest);
	return false;
}

1738
/* v_opts is used to dump the options which must be equal in the whole range */
1739
static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1740
			      const struct net_bridge_vlan *v_opts,
1741 1742
			      u16 flags,
			      bool dump_stats)
1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
{
	struct bridge_vlan_info info;
	struct nlattr *nest;

	nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
	if (!nest)
		return false;

	memset(&info, 0, sizeof(info));
	info.vid = vid;
	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
		info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
	if (flags & BRIDGE_VLAN_INFO_PVID)
		info.flags |= BRIDGE_VLAN_INFO_PVID;

	if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
		goto out_err;

1761 1762 1763 1764 1765
	if (vid_range && vid < vid_range &&
	    !(flags & BRIDGE_VLAN_INFO_PVID) &&
	    nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
		goto out_err;

1766 1767 1768 1769 1770 1771 1772
	if (v_opts) {
		if (!br_vlan_opts_fill(skb, v_opts))
			goto out_err;

		if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
			goto out_err;
	}
1773

1774 1775 1776 1777 1778 1779 1780 1781 1782
	nla_nest_end(skb, nest);

	return true;

out_err:
	nla_nest_cancel(skb, nest);
	return false;
}

1783 1784 1785 1786 1787
static size_t rtnl_vlan_nlmsg_size(void)
{
	return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
		+ nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
		+ nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1788 1789
		+ nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
		+ br_vlan_opts_nl_size(); /* bridge vlan options */
1790 1791 1792 1793 1794 1795 1796 1797
}

void br_vlan_notify(const struct net_bridge *br,
		    const struct net_bridge_port *p,
		    u16 vid, u16 vid_range,
		    int cmd)
{
	struct net_bridge_vlan_group *vg;
1798
	struct net_bridge_vlan *v = NULL;
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
	struct br_vlan_msg *bvm;
	struct nlmsghdr *nlh;
	struct sk_buff *skb;
	int err = -ENOBUFS;
	struct net *net;
	u16 flags = 0;
	int ifindex;

	/* right now notifications are done only with rtnl held */
	ASSERT_RTNL();

	if (p) {
		ifindex = p->dev->ifindex;
		vg = nbp_vlan_group(p);
		net = dev_net(p->dev);
	} else {
		ifindex = br->dev->ifindex;
		vg = br_vlan_group(br);
		net = dev_net(br->dev);
	}

	skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
	if (!skb)
		goto out_err;

	err = -EMSGSIZE;
	nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
	if (!nlh)
		goto out_err;
	bvm = nlmsg_data(nlh);
	memset(bvm, 0, sizeof(*bvm));
	bvm->family = AF_BRIDGE;
	bvm->ifindex = ifindex;

	switch (cmd) {
	case RTM_NEWVLAN:
		/* need to find the vlan due to flags/options */
		v = br_vlan_find(vg, vid);
		if (!v || !br_vlan_should_use(v))
			goto out_kfree;

		flags = v->flags;
		if (br_get_pvid(vg) == v->vid)
			flags |= BRIDGE_VLAN_INFO_PVID;
		break;
	case RTM_DELVLAN:
		break;
	default:
		goto out_kfree;
	}

1850
	if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862
		goto out_err;

	nlmsg_end(skb, nlh);
	rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
	return;

out_err:
	rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
out_kfree:
	kfree_skb(skb);
}

1863
/* check if v_curr can enter a range ending in range_end */
1864 1865
bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
			     const struct net_bridge_vlan *range_end)
1866 1867
{
	return v_curr->vid - range_end->vid == 1 &&
1868
	       range_end->flags == v_curr->flags &&
1869
	       br_vlan_opts_eq_range(v_curr, range_end);
1870 1871
}

1872 1873
static int br_vlan_dump_dev(const struct net_device *dev,
			    struct sk_buff *skb,
1874 1875
			    struct netlink_callback *cb,
			    u32 dump_flags)
1876
{
1877
	struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1878
	bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL);
1879
	bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
	struct net_bridge_vlan_group *vg;
	int idx = 0, s_idx = cb->args[1];
	struct nlmsghdr *nlh = NULL;
	struct net_bridge_port *p;
	struct br_vlan_msg *bvm;
	struct net_bridge *br;
	int err = 0;
	u16 pvid;

	if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
		return -EINVAL;

	if (netif_is_bridge_master(dev)) {
		br = netdev_priv(dev);
		vg = br_vlan_group_rcu(br);
		p = NULL;
	} else {
1897 1898 1899 1900
		/* global options are dumped only for bridge devices */
		if (dump_global)
			return 0;

1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
		p = br_port_get_rcu(dev);
		if (WARN_ON(!p))
			return -EINVAL;
		vg = nbp_vlan_group_rcu(p);
		br = p->br;
	}

	if (!vg)
		return 0;

	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
			RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
	if (!nlh)
		return -EMSGSIZE;
	bvm = nlmsg_data(nlh);
	memset(bvm, 0, sizeof(*bvm));
	bvm->family = PF_BRIDGE;
	bvm->ifindex = dev->ifindex;
	pvid = br_get_pvid(vg);

1921
	/* idx must stay at range's beginning until it is filled in */
1922
	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
1923
		if (!dump_global && !br_vlan_should_use(v))
1924
			continue;
1925 1926 1927
		if (idx < s_idx) {
			idx++;
			continue;
1928
		}
1929 1930 1931 1932 1933 1934 1935

		if (!range_start) {
			range_start = v;
			range_end = v;
			continue;
		}

1936 1937
		if (dump_global) {
			if (br_vlan_global_opts_can_enter_range(v, range_end))
1938
				goto update_end;
1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950
			if (!br_vlan_global_opts_fill(skb, range_start->vid,
						      range_end->vid,
						      range_start)) {
				err = -EMSGSIZE;
				break;
			}
			/* advance number of filled vlans */
			idx += range_end->vid - range_start->vid + 1;

			range_start = v;
		} else if (dump_stats || v->vid == pvid ||
			   !br_vlan_can_enter_range(v, range_end)) {
1951
			u16 vlan_flags = br_vlan_flags(range_start, pvid);
1952 1953

			if (!br_vlan_fill_vids(skb, range_start->vid,
1954
					       range_end->vid, range_start,
1955
					       vlan_flags, dump_stats)) {
1956 1957 1958 1959 1960 1961 1962 1963
				err = -EMSGSIZE;
				break;
			}
			/* advance number of filled vlans */
			idx += range_end->vid - range_start->vid + 1;

			range_start = v;
		}
1964
update_end:
1965
		range_end = v;
1966
	}
1967 1968 1969 1970 1971 1972

	/* err will be 0 and range_start will be set in 3 cases here:
	 * - first vlan (range_start == range_end)
	 * - last vlan (range_start == range_end, not in range)
	 * - last vlan range (range_start != range_end, in range)
	 */
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
	if (!err && range_start) {
		if (dump_global &&
		    !br_vlan_global_opts_fill(skb, range_start->vid,
					      range_end->vid, range_start))
			err = -EMSGSIZE;
		else if (!dump_global &&
			 !br_vlan_fill_vids(skb, range_start->vid,
					    range_end->vid, range_start,
					    br_vlan_flags(range_start, pvid),
					    dump_stats))
			err = -EMSGSIZE;
	}
1985 1986 1987

	cb->args[1] = err ? idx : 0;

1988 1989 1990 1991 1992
	nlmsg_end(skb, nlh);

	return err;
}

1993 1994 1995 1996
static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
	[BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
};

1997 1998
static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
1999
	struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
2000 2001 2002 2003
	int idx = 0, err = 0, s_idx = cb->args[0];
	struct net *net = sock_net(skb->sk);
	struct br_vlan_msg *bvm;
	struct net_device *dev;
2004
	u32 dump_flags = 0;
2005

2006 2007
	err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
			  br_vlan_db_dump_pol, cb->extack);
2008 2009 2010 2011
	if (err < 0)
		return err;

	bvm = nlmsg_data(cb->nlh);
2012 2013
	if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
		dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
2014 2015 2016 2017 2018 2019 2020 2021

	rcu_read_lock();
	if (bvm->ifindex) {
		dev = dev_get_by_index_rcu(net, bvm->ifindex);
		if (!dev) {
			err = -ENODEV;
			goto out_err;
		}
2022
		err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2023 2024
		/* if the dump completed without an error we return 0 here */
		if (err != -EMSGSIZE)
2025 2026 2027 2028 2029 2030
			goto out_err;
	} else {
		for_each_netdev_rcu(net, dev) {
			if (idx < s_idx)
				goto skip;

2031
			err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
			if (err == -EMSGSIZE)
				break;
skip:
			idx++;
		}
	}
	cb->args[0] = idx;
	rcu_read_unlock();

	return skb->len;

out_err:
	rcu_read_unlock();

	return err;
}

2049
static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
2050 2051
	[BRIDGE_VLANDB_ENTRY_INFO]	=
		NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
2052
	[BRIDGE_VLANDB_ENTRY_RANGE]	= { .type = NLA_U16 },
2053
	[BRIDGE_VLANDB_ENTRY_STATE]	= { .type = NLA_U8 },
2054
	[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
2055
	[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]	= { .type = NLA_U8 },
2056 2057 2058 2059 2060 2061
};

static int br_vlan_rtm_process_one(struct net_device *dev,
				   const struct nlattr *attr,
				   int cmd, struct netlink_ext_ack *extack)
{
2062
	struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
2063
	struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
2064
	bool changed = false, skip_processing = false;
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
	struct net_bridge_vlan_group *vg;
	struct net_bridge_port *p = NULL;
	int err = 0, cmdmap = 0;
	struct net_bridge *br;

	if (netif_is_bridge_master(dev)) {
		br = netdev_priv(dev);
		vg = br_vlan_group(br);
	} else {
		p = br_port_get_rtnl(dev);
		if (WARN_ON(!p))
			return -ENODEV;
		br = p->br;
		vg = nbp_vlan_group(p);
	}

	if (WARN_ON(!vg))
		return -ENODEV;

	err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
			       br_vlan_db_policy, extack);
	if (err)
		return err;

	if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
		NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
		return -EINVAL;
	}
2093
	memset(&vrange_end, 0, sizeof(vrange_end));
2094 2095 2096 2097 2098 2099 2100 2101 2102 2103

	vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
	if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
			    BRIDGE_VLAN_INFO_RANGE_END)) {
		NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
		return -EINVAL;
	}
	if (!br_vlan_valid_id(vinfo->vid, extack))
		return -EINVAL;

2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
	if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
		vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
		/* validate user-provided flags without RANGE_BEGIN */
		vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
		vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;

		/* vinfo_last is the range start, vinfo the range end */
		vinfo_last = vinfo;
		vinfo = &vrange_end;

		if (!br_vlan_valid_id(vinfo->vid, extack) ||
		    !br_vlan_valid_range(vinfo, vinfo_last, extack))
			return -EINVAL;
	}

2119 2120 2121
	switch (cmd) {
	case RTM_NEWVLAN:
		cmdmap = RTM_SETLINK;
2122
		skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
2123
		break;
2124 2125 2126
	case RTM_DELVLAN:
		cmdmap = RTM_DELLINK;
		break;
2127 2128
	}

2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
	if (!skip_processing) {
		struct bridge_vlan_info *tmp_last = vinfo_last;

		/* br_process_vlan_info may overwrite vinfo_last */
		err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
					   &changed, extack);

		/* notify first if anything changed */
		if (changed)
			br_ifinfo_notify(cmdmap, br, p);

		if (err)
			return err;
	}

	/* deal with options */
	if (cmd == RTM_NEWVLAN) {
		struct net_bridge_vlan *range_start, *range_end;

		if (vinfo_last) {
			range_start = br_vlan_find(vg, vinfo_last->vid);
			range_end = br_vlan_find(vg, vinfo->vid);
		} else {
			range_start = br_vlan_find(vg, vinfo->vid);
			range_end = range_start;
		}

		err = br_vlan_process_options(br, p, range_start, range_end,
					      tb, extack);
	}
2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189

	return err;
}

static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
			       struct netlink_ext_ack *extack)
{
	struct net *net = sock_net(skb->sk);
	struct br_vlan_msg *bvm;
	struct net_device *dev;
	struct nlattr *attr;
	int err, vlans = 0;
	int rem;

	/* this should validate the header and check for remaining bytes */
	err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
			  extack);
	if (err < 0)
		return err;

	bvm = nlmsg_data(nlh);
	dev = __dev_get_by_index(net, bvm->ifindex);
	if (!dev)
		return -ENODEV;

	if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
		NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
		return -EINVAL;
	}

	nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201
		switch (nla_type(attr)) {
		case BRIDGE_VLANDB_ENTRY:
			err = br_vlan_rtm_process_one(dev, attr,
						      nlh->nlmsg_type,
						      extack);
			break;
		case BRIDGE_VLANDB_GLOBAL_OPTIONS:
			err = br_vlan_rtm_process_global_options(dev, attr,
								 nlh->nlmsg_type,
								 extack);
			break;
		default:
2202
			continue;
2203
		}
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216

		vlans++;
		if (err)
			break;
	}
	if (!vlans) {
		NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
		err = -EINVAL;
	}

	return err;
}

2217 2218 2219 2220
void br_vlan_rtnl_init(void)
{
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
			     br_vlan_rtm_dump, 0);
2221 2222
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
			     br_vlan_rtm_process, NULL, 0);
2223 2224
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
			     br_vlan_rtm_process, NULL, 0);
2225 2226 2227 2228 2229
}

void br_vlan_rtnl_uninit(void)
{
	rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
2230
	rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
2231
	rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
2232
}