br_vlan.c 24.9 KB
Newer Older
1 2 3 4
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
5
#include <net/switchdev.h>
6 7

#include "br_private.h"
8
#include "br_private_tunnel.h"
9

10 11
static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
			      const void *ptr)
12
{
13 14 15 16 17 18 19 20 21 22
	const struct net_bridge_vlan *vle = ptr;
	u16 vid = *(u16 *)arg->key;

	return vle->vid != vid;
}

static const struct rhashtable_params br_vlan_rht_params = {
	.head_offset = offsetof(struct net_bridge_vlan, vnode),
	.key_offset = offsetof(struct net_bridge_vlan, vid),
	.key_len = sizeof(u16),
23 24
	.nelem_hint = 3,
	.locks_mul = 1,
25 26 27 28 29 30 31 32 33 34
	.max_size = VLAN_N_VID,
	.obj_cmpfn = br_vlan_cmp,
	.automatic_shrinking = true,
};

static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
{
	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
}

35
static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
36
{
37
	if (vg->pvid == vid)
38 39 40
		return;

	smp_wmb();
41
	vg->pvid = vid;
42 43
}

44
static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
45
{
46
	if (vg->pvid != vid)
47 48 49
		return;

	smp_wmb();
50
	vg->pvid = 0;
51 52
}

53
static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
54
{
55 56 57
	struct net_bridge_vlan_group *vg;

	if (br_vlan_is_master(v))
58
		vg = br_vlan_group(v->br);
59
	else
60
		vg = nbp_vlan_group(v->port);
61 62 63 64 65

	if (flags & BRIDGE_VLAN_INFO_PVID)
		__vlan_add_pvid(vg, v->vid);
	else
		__vlan_delete_pvid(vg, v->vid);
66 67

	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
68
		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
69
	else
70
		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
71 72
}

73 74 75
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
			  u16 vid, u16 flags)
{
76
	struct switchdev_obj_port_vlan v = {
77
		.obj.orig_dev = dev,
78 79 80 81 82
		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
		.flags = flags,
		.vid_begin = vid,
		.vid_end = vid,
	};
83 84
	int err;

85 86
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q add.
87
	 */
88 89 90
	err = switchdev_port_obj_add(dev, &v.obj);
	if (err == -EOPNOTSUPP)
		return vlan_vid_add(dev, br->vlan_proto, vid);
91 92 93
	return err;
}

94
static void __vlan_add_list(struct net_bridge_vlan *v)
95
{
96
	struct net_bridge_vlan_group *vg;
97 98
	struct list_head *headp, *hpos;
	struct net_bridge_vlan *vent;
99

100 101 102 103 104 105
	if (br_vlan_is_master(v))
		vg = br_vlan_group(v->br);
	else
		vg = nbp_vlan_group(v->port);

	headp = &vg->vlan_list;
106 107 108 109 110 111
	list_for_each_prev(hpos, headp) {
		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
		if (v->vid < vent->vid)
			continue;
		else
			break;
112
	}
113
	list_add_rcu(&v->vlist, hpos);
114
}
115

116 117
static void __vlan_del_list(struct net_bridge_vlan *v)
{
118
	list_del_rcu(&v->vlist);
119 120
}

121 122
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
			  u16 vid)
123
{
124
	struct switchdev_obj_port_vlan v = {
125
		.obj.orig_dev = dev,
126 127 128 129 130
		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
		.vid_begin = vid,
		.vid_end = vid,
	};
	int err;
131

132 133
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q del.
134
	 */
135 136
	err = switchdev_port_obj_del(dev, &v.obj);
	if (err == -EOPNOTSUPP) {
137
		vlan_vid_del(dev, br->vlan_proto, vid);
138
		return 0;
139
	}
140
	return err;
141 142
}

143 144 145 146 147
/* Returns a master vlan, if it didn't exist it gets created. In all cases a
 * a reference is taken to the master vlan before returning.
 */
static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
{
148
	struct net_bridge_vlan_group *vg;
149 150
	struct net_bridge_vlan *masterv;

151 152
	vg = br_vlan_group(br);
	masterv = br_vlan_find(vg, vid);
153 154 155 156
	if (!masterv) {
		/* missing global ctx, create it now */
		if (br_vlan_add(br, vid, 0))
			return NULL;
157
		masterv = br_vlan_find(vg, vid);
158 159 160 161 162 163 164 165
		if (WARN_ON(!masterv))
			return NULL;
	}
	atomic_inc(&masterv->refcnt);

	return masterv;
}

166 167 168 169 170 171 172 173 174 175 176
static void br_master_vlan_rcu_free(struct rcu_head *rcu)
{
	struct net_bridge_vlan *v;

	v = container_of(rcu, struct net_bridge_vlan, rcu);
	WARN_ON(!br_vlan_is_master(v));
	free_percpu(v->stats);
	v->stats = NULL;
	kfree(v);
}

177 178
static void br_vlan_put_master(struct net_bridge_vlan *masterv)
{
179 180
	struct net_bridge_vlan_group *vg;

181 182 183
	if (!br_vlan_is_master(masterv))
		return;

184
	vg = br_vlan_group(masterv->br);
185
	if (atomic_dec_and_test(&masterv->refcnt)) {
186
		rhashtable_remove_fast(&vg->vlan_hash,
187 188
				       &masterv->vnode, br_vlan_rht_params);
		__vlan_del_list(masterv);
189
		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
190 191 192
	}
}

193 194 195 196
/* This is the shared VLAN add function which works for both ports and bridge
 * devices. There are four possible calls to this function in terms of the
 * vlan entry type:
 * 1. vlan is being added on a port (no master flags, global entry exists)
197
 * 2. vlan is being added on a bridge (both master and brentry flags)
198
 * 3. vlan is being added on a port, but a global entry didn't exist which
199
 *    is being created right now (master flag set, brentry flag unset), the
200
 *    global entry is used for global per-vlan features, but not for filtering
201
 * 4. same as 3 but with both master and brentry flags set so the entry
202 203 204
 *    will be used for filtering in both the port and the bridge
 */
static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
205
{
206 207
	struct net_bridge_vlan *masterv = NULL;
	struct net_bridge_port *p = NULL;
208
	struct net_bridge_vlan_group *vg;
209 210 211 212 213 214 215
	struct net_device *dev;
	struct net_bridge *br;
	int err;

	if (br_vlan_is_master(v)) {
		br = v->br;
		dev = br->dev;
216
		vg = br_vlan_group(br);
217 218 219 220
	} else {
		p = v->port;
		br = p->br;
		dev = p->dev;
221
		vg = nbp_vlan_group(p);
222 223 224 225 226 227 228 229 230 231 232 233 234
	}

	if (p) {
		/* Add VLAN to the device filter if it is supported.
		 * This ensures tagged traffic enters the bridge when
		 * promiscuous mode is disabled by br_manage_promisc().
		 */
		err = __vlan_vid_add(dev, br, v->vid, flags);
		if (err)
			goto out;

		/* need to work on the master vlan too */
		if (flags & BRIDGE_VLAN_INFO_MASTER) {
235 236
			err = br_vlan_add(br, v->vid, flags |
						      BRIDGE_VLAN_INFO_BRENTRY);
237 238 239 240
			if (err)
				goto out_filt;
		}

241 242 243
		masterv = br_vlan_get_master(br, v->vid);
		if (!masterv)
			goto out_filt;
244
		v->brvlan = masterv;
245
		v->stats = masterv->stats;
246 247
	}

248
	/* Add the dev mac and count the vlan only if it's usable */
249 250 251 252 253 254
	if (br_vlan_should_use(v)) {
		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
		if (err) {
			br_err(br, "failed insert local address into bridge forwarding table\n");
			goto out_filt;
		}
255
		vg->num_vlans++;
256 257
	}

258 259
	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
					    br_vlan_rht_params);
260 261
	if (err)
		goto out_fdb_insert;
262

263 264 265 266 267 268
	__vlan_add_list(v);
	__vlan_add_flags(v, flags);
out:
	return err;

out_fdb_insert:
269 270 271 272
	if (br_vlan_should_use(v)) {
		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
		vg->num_vlans--;
	}
273 274 275 276 277

out_filt:
	if (p) {
		__vlan_vid_del(dev, br, v->vid);
		if (masterv) {
278
			br_vlan_put_master(masterv);
279 280 281 282 283 284 285 286 287 288
			v->brvlan = NULL;
		}
	}

	goto out;
}

static int __vlan_del(struct net_bridge_vlan *v)
{
	struct net_bridge_vlan *masterv = v;
289
	struct net_bridge_vlan_group *vg;
290 291
	struct net_bridge_port *p = NULL;
	int err = 0;
292

293
	if (br_vlan_is_master(v)) {
294
		vg = br_vlan_group(v->br);
295 296
	} else {
		p = v->port;
297
		vg = nbp_vlan_group(v->port);
298 299
		masterv = v->brvlan;
	}
300

301
	__vlan_delete_pvid(vg, v->vid);
302 303
	if (p) {
		err = __vlan_vid_del(p->dev, p->br, v->vid);
304
		if (err)
305
			goto out;
306
	}
307

308 309 310
	if (br_vlan_should_use(v)) {
		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
		vg->num_vlans--;
311 312 313
	}

	if (masterv != v) {
314
		vlan_tunnel_info_del(vg, v);
315 316
		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
				       br_vlan_rht_params);
317
		__vlan_del_list(v);
318 319
		kfree_rcu(v, rcu);
	}
320

321
	br_vlan_put_master(masterv);
322 323
out:
	return err;
324 325
}

326 327 328 329
static void __vlan_group_free(struct net_bridge_vlan_group *vg)
{
	WARN_ON(!list_empty(&vg->vlan_list));
	rhashtable_destroy(&vg->vlan_hash);
330
	vlan_tunnel_deinit(vg);
331 332 333 334
	kfree(vg);
}

static void __vlan_flush(struct net_bridge_vlan_group *vg)
335
{
336 337
	struct net_bridge_vlan *vlan, *tmp;

338 339
	__vlan_delete_pvid(vg, vg->pvid);
	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
340
		__vlan_del(vlan);
341 342
}

343
struct sk_buff *br_handle_vlan(struct net_bridge *br,
344
			       const struct net_bridge_port *p,
345
			       struct net_bridge_vlan_group *vg,
346
			       struct sk_buff *skb)
347
{
348
	struct br_vlan_stats *stats;
349
	struct net_bridge_vlan *v;
350 351
	u16 vid;

352 353
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
354 355
		goto out;

356 357 358 359 360 361 362
	/* At this point, we know that the frame was filtered and contains
	 * a valid vlan id.  If the vlan id has untagged flag set,
	 * send untagged; otherwise, send tagged.
	 */
	br_vlan_get_tag(skb, &vid);
	v = br_vlan_find(vg, vid);
	/* Vlan entry must be configured at this point.  The
363 364 365 366
	 * only exception is the bridge is set in promisc mode and the
	 * packet is destined for the bridge device.  In this case
	 * pass the packet as is.
	 */
367
	if (!v || !br_vlan_should_use(v)) {
368 369 370 371 372 373 374
		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
			goto out;
		} else {
			kfree_skb(skb);
			return NULL;
		}
	}
375 376 377 378 379 380 381 382
	if (br->vlan_stats_enabled) {
		stats = this_cpu_ptr(v->stats);
		u64_stats_update_begin(&stats->syncp);
		stats->tx_bytes += skb->len;
		stats->tx_packets++;
		u64_stats_update_end(&stats->syncp);
	}

383
	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
384
		skb->vlan_tci = 0;
385 386 387 388 389 390

	if (p && (p->flags & BR_VLAN_TUNNEL) &&
	    br_handle_egress_vlan_tunnel(skb, v)) {
		kfree_skb(skb);
		return NULL;
	}
391 392 393 394 395
out:
	return skb;
}

/* Called under RCU */
396 397
static bool __allowed_ingress(const struct net_bridge *br,
			      struct net_bridge_vlan_group *vg,
398
			      struct sk_buff *skb, u16 *vid)
399
{
400 401
	struct br_vlan_stats *stats;
	struct net_bridge_vlan *v;
402
	bool tagged;
403

404
	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
405 406 407 408
	/* If vlan tx offload is disabled on bridge device and frame was
	 * sent from vlan device on the bridge device, it does not have
	 * HW accelerated vlan tag.
	 */
409
	if (unlikely(!skb_vlan_tag_present(skb) &&
410
		     skb->protocol == br->vlan_proto)) {
411
		skb = skb_vlan_untag(skb);
412 413 414 415
		if (unlikely(!skb))
			return false;
	}

416 417
	if (!br_vlan_get_tag(skb, vid)) {
		/* Tagged frame */
418
		if (skb->vlan_proto != br->vlan_proto) {
419 420
			/* Protocol-mismatch, empty out vlan_tci for new tag */
			skb_push(skb, ETH_HLEN);
421
			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
422
							skb_vlan_tag_get(skb));
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
			if (unlikely(!skb))
				return false;

			skb_pull(skb, ETH_HLEN);
			skb_reset_mac_len(skb);
			*vid = 0;
			tagged = false;
		} else {
			tagged = true;
		}
	} else {
		/* Untagged frame */
		tagged = false;
	}

438
	if (!*vid) {
439 440
		u16 pvid = br_get_pvid(vg);

441 442 443
		/* Frame had a tag with VID 0 or did not have a tag.
		 * See if pvid is set on this port.  That tells us which
		 * vlan untagged or priority-tagged traffic belongs to.
444
		 */
V
Vlad Yasevich 已提交
445
		if (!pvid)
446
			goto drop;
447

448 449
		/* PVID is set on this port.  Any untagged or priority-tagged
		 * ingress frame is considered to belong to this vlan.
450
		 */
451
		*vid = pvid;
452
		if (likely(!tagged))
453
			/* Untagged Frame. */
454
			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
455 456 457 458 459 460 461 462
		else
			/* Priority-tagged Frame.
			 * At this point, We know that skb->vlan_tci had
			 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
			 * We update only VID field and preserve PCP field.
			 */
			skb->vlan_tci |= pvid;

463 464 465
		/* if stats are disabled we can avoid the lookup */
		if (!br->vlan_stats_enabled)
			return true;
466
	}
467
	v = br_vlan_find(vg, *vid);
468 469 470 471 472 473 474 475 476 477 478 479 480
	if (!v || !br_vlan_should_use(v))
		goto drop;

	if (br->vlan_stats_enabled) {
		stats = this_cpu_ptr(v->stats);
		u64_stats_update_begin(&stats->syncp);
		stats->rx_bytes += skb->len;
		stats->rx_packets++;
		u64_stats_update_end(&stats->syncp);
	}

	return true;

481 482
drop:
	kfree_skb(skb);
483 484 485
	return false;
}

486 487 488
bool br_allowed_ingress(const struct net_bridge *br,
			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
			u16 *vid)
489 490 491 492 493 494 495 496 497
{
	/* If VLAN filtering is disabled on the bridge, all packets are
	 * permitted.
	 */
	if (!br->vlan_enabled) {
		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
		return true;
	}

498
	return __allowed_ingress(br, vg, skb, vid);
499 500
}

501
/* Called under RCU. */
502
bool br_allowed_egress(struct net_bridge_vlan_group *vg,
503 504
		       const struct sk_buff *skb)
{
505
	const struct net_bridge_vlan *v;
506 507
	u16 vid;

508 509
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
510 511 512
		return true;

	br_vlan_get_tag(skb, &vid);
513 514
	v = br_vlan_find(vg, vid);
	if (v && br_vlan_should_use(v))
515 516 517 518 519
		return true;

	return false;
}

520 521 522
/* Called under RCU */
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
{
523
	struct net_bridge_vlan_group *vg;
524 525
	struct net_bridge *br = p->br;

526
	/* If filtering was disabled at input, let it pass. */
527
	if (!br->vlan_enabled)
528 529
		return true;

530
	vg = nbp_vlan_group_rcu(p);
531
	if (!vg || !vg->num_vlans)
532 533
		return false;

534 535 536
	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
		*vid = 0;

537
	if (!*vid) {
538
		*vid = br_get_pvid(vg);
V
Vlad Yasevich 已提交
539
		if (!*vid)
540 541 542 543 544
			return false;

		return true;
	}

545
	if (br_vlan_find(vg, *vid))
546 547 548 549 550
		return true;

	return false;
}

551 552 553
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
554
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
555
{
556
	struct net_bridge_vlan_group *vg;
557 558
	struct net_bridge_vlan *vlan;
	int ret;
559 560 561

	ASSERT_RTNL();

562 563
	vg = br_vlan_group(br);
	vlan = br_vlan_find(vg, vid);
564 565 566 567 568 569 570 571 572 573 574 575 576 577
	if (vlan) {
		if (!br_vlan_is_brentry(vlan)) {
			/* Trying to change flags of non-existent bridge vlan */
			if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
				return -EINVAL;
			/* It was only kept for port vlans, now make it real */
			ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
					    vlan->vid);
			if (ret) {
				br_err(br, "failed insert local address into bridge forwarding table\n");
				return ret;
			}
			atomic_inc(&vlan->refcnt);
			vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
578
			vg->num_vlans++;
579 580 581 582
		}
		__vlan_add_flags(vlan, flags);
		return 0;
	}
583

584 585
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
586 587
		return -ENOMEM;

588 589 590 591 592
	vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
	if (!vlan->stats) {
		kfree(vlan);
		return -ENOMEM;
	}
593 594 595 596 597 598 599
	vlan->vid = vid;
	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
	vlan->br = br;
	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
		atomic_set(&vlan->refcnt, 1);
	ret = __vlan_add(vlan, flags);
600 601
	if (ret) {
		free_percpu(vlan->stats);
602
		kfree(vlan);
603
	}
604

605
	return ret;
606 607
}

608 609 610
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
611 612
int br_vlan_delete(struct net_bridge *br, u16 vid)
{
613
	struct net_bridge_vlan_group *vg;
614
	struct net_bridge_vlan *v;
615 616 617

	ASSERT_RTNL();

618 619
	vg = br_vlan_group(br);
	v = br_vlan_find(vg, vid);
620 621
	if (!v || !br_vlan_is_brentry(v))
		return -ENOENT;
622

623
	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
624
	br_fdb_delete_by_port(br, NULL, vid, 0);
625

626 627
	vlan_tunnel_info_del(vg, v);

628
	return __vlan_del(v);
629 630 631 632
}

void br_vlan_flush(struct net_bridge *br)
{
633 634
	struct net_bridge_vlan_group *vg;

635 636
	ASSERT_RTNL();

637 638 639 640 641
	vg = br_vlan_group(br);
	__vlan_flush(vg);
	RCU_INIT_POINTER(br->vlgrp, NULL);
	synchronize_rcu();
	__vlan_group_free(vg);
642 643
}

644
struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
645
{
646 647
	if (!vg)
		return NULL;
648

649
	return br_vlan_lookup(&vg->vlan_hash, vid);
650 651
}

652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
/* Must be protected by RTNL. */
static void recalculate_group_addr(struct net_bridge *br)
{
	if (br->group_addr_set)
		return;

	spin_lock_bh(&br->lock);
	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
		/* Bridge Group Address */
		br->group_addr[5] = 0x00;
	} else { /* vlan_enabled && ETH_P_8021AD */
		/* Provider Bridge Group Address */
		br->group_addr[5] = 0x08;
	}
	spin_unlock_bh(&br->lock);
}

/* Must be protected by RTNL. */
void br_recalculate_fwd_mask(struct net_bridge *br)
{
	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
	else /* vlan_enabled && ETH_P_8021AD */
		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
					      ~(1u << br->group_addr[5]);
}

679
int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
680
{
681 682 683 684 685 686 687 688
	struct switchdev_attr attr = {
		.orig_dev = br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
		.u.vlan_filtering = val,
	};
	int err;

689
	if (br->vlan_enabled == val)
690
		return 0;
691

692 693 694 695
	err = switchdev_port_attr_set(br->dev, &attr);
	if (err && err != -EOPNOTSUPP)
		return err;

696
	br->vlan_enabled = val;
697
	br_manage_promisc(br);
698 699
	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);
700

701 702 703 704 705
	return 0;
}

int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
{
706
	return __br_vlan_filter_toggle(br, val);
707 708
}

709 710 711 712 713 714 715 716
bool br_vlan_enabled(const struct net_device *dev)
{
	struct net_bridge *br = netdev_priv(dev);

	return !!br->vlan_enabled;
}
EXPORT_SYMBOL_GPL(br_vlan_enabled);

717
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
718 719 720
{
	int err = 0;
	struct net_bridge_port *p;
721
	struct net_bridge_vlan *vlan;
722
	struct net_bridge_vlan_group *vg;
723
	__be16 oldproto;
724 725

	if (br->vlan_proto == proto)
726
		return 0;
727 728 729

	/* Add VLANs for the new proto to the device filter. */
	list_for_each_entry(p, &br->port_list, list) {
730 731
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
732
			err = vlan_vid_add(p->dev, proto, vlan->vid);
733 734 735 736 737 738 739 740 741 742 743 744
			if (err)
				goto err_filt;
		}
	}

	oldproto = br->vlan_proto;
	br->vlan_proto = proto;

	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);

	/* Delete VLANs for the old proto from the device filter. */
745 746 747
	list_for_each_entry(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist)
748
			vlan_vid_del(p->dev, oldproto, vlan->vid);
749
	}
750

751
	return 0;
752 753

err_filt:
754
	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
755
		vlan_vid_del(p->dev, proto, vlan->vid);
756

757 758 759
	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist)
760
			vlan_vid_del(p->dev, proto, vlan->vid);
761
	}
762

763 764 765 766 767 768 769 770
	return err;
}

int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
{
	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
		return -EPROTONOSUPPORT;

771
	return __br_vlan_set_proto(br, htons(val));
772 773
}

774 775 776 777 778 779 780 781 782 783 784 785 786 787
int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
{
	switch (val) {
	case 0:
	case 1:
		br->vlan_stats_enabled = val;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

788
static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
789
{
790 791
	struct net_bridge_vlan *v;

792
	if (vid != vg->pvid)
793 794 795 796 797 798 799 800
		return false;

	v = br_vlan_lookup(&vg->vlan_hash, vid);
	if (v && br_vlan_should_use(v) &&
	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
		return true;

	return false;
801 802 803 804 805 806 807 808 809 810
}

static void br_vlan_disable_default_pvid(struct net_bridge *br)
{
	struct net_bridge_port *p;
	u16 pvid = br->default_pvid;

	/* Disable default_pvid on all ports where it is still
	 * configured.
	 */
811
	if (vlan_default_pvid(br_vlan_group(br), pvid))
812 813 814
		br_vlan_delete(br, pvid);

	list_for_each_entry(p, &br->port_list, list) {
815
		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
816 817 818 819 820 821
			nbp_vlan_delete(p, pvid);
	}

	br->default_pvid = 0;
}

822
int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
823
{
824
	const struct net_bridge_vlan *pvent;
825
	struct net_bridge_vlan_group *vg;
826 827 828 829 830
	struct net_bridge_port *p;
	u16 old_pvid;
	int err = 0;
	unsigned long *changed;

831 832 833 834 835
	if (!pvid) {
		br_vlan_disable_default_pvid(br);
		return 0;
	}

836 837 838 839 840 841 842 843 844 845
	changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
			  GFP_KERNEL);
	if (!changed)
		return -ENOMEM;

	old_pvid = br->default_pvid;

	/* Update default_pvid config only if we do not conflict with
	 * user configuration.
	 */
846 847 848
	vg = br_vlan_group(br);
	pvent = br_vlan_find(vg, pvid);
	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
849
	    (!pvent || !br_vlan_should_use(pvent))) {
850 851
		err = br_vlan_add(br, pvid,
				  BRIDGE_VLAN_INFO_PVID |
852 853
				  BRIDGE_VLAN_INFO_UNTAGGED |
				  BRIDGE_VLAN_INFO_BRENTRY);
854 855 856 857 858 859 860 861 862 863
		if (err)
			goto out;
		br_vlan_delete(br, old_pvid);
		set_bit(0, changed);
	}

	list_for_each_entry(p, &br->port_list, list) {
		/* Update default_pvid config only if we do not conflict with
		 * user configuration.
		 */
864
		vg = nbp_vlan_group(p);
865
		if ((old_pvid &&
866 867
		     !vlan_default_pvid(vg, old_pvid)) ||
		    br_vlan_find(vg, pvid))
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
			continue;

		err = nbp_vlan_add(p, pvid,
				   BRIDGE_VLAN_INFO_PVID |
				   BRIDGE_VLAN_INFO_UNTAGGED);
		if (err)
			goto err_port;
		nbp_vlan_delete(p, old_pvid);
		set_bit(p->port_no, changed);
	}

	br->default_pvid = pvid;

out:
	kfree(changed);
	return err;

err_port:
	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
		if (!test_bit(p->port_no, changed))
			continue;

		if (old_pvid)
			nbp_vlan_add(p, old_pvid,
				     BRIDGE_VLAN_INFO_PVID |
				     BRIDGE_VLAN_INFO_UNTAGGED);
		nbp_vlan_delete(p, pvid);
	}

	if (test_bit(0, changed)) {
		if (old_pvid)
			br_vlan_add(br, old_pvid,
				    BRIDGE_VLAN_INFO_PVID |
901 902
				    BRIDGE_VLAN_INFO_UNTAGGED |
				    BRIDGE_VLAN_INFO_BRENTRY);
903 904 905 906 907
		br_vlan_delete(br, pvid);
	}
	goto out;
}

908 909 910 911 912
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
{
	u16 pvid = val;
	int err = 0;

913
	if (val >= VLAN_VID_MASK)
914 915 916
		return -EINVAL;

	if (pvid == br->default_pvid)
917
		goto out;
918 919 920 921 922

	/* Only allow default pvid change when filtering is disabled */
	if (br->vlan_enabled) {
		pr_info_once("Please disable vlan filtering to change default_pvid\n");
		err = -EPERM;
923
		goto out;
924
	}
925
	err = __br_vlan_set_default_pvid(br, pvid);
926
out:
927 928 929
	return err;
}

930
int br_vlan_init(struct net_bridge *br)
931
{
932
	struct net_bridge_vlan_group *vg;
933 934
	int ret = -ENOMEM;

935 936
	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
	if (!vg)
937
		goto out;
938
	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
939 940
	if (ret)
		goto err_rhtbl;
941 942 943
	ret = vlan_tunnel_init(vg);
	if (ret)
		goto err_tunnel_init;
944
	INIT_LIST_HEAD(&vg->vlan_list);
945
	br->vlan_proto = htons(ETH_P_8021Q);
946
	br->default_pvid = 1;
947
	rcu_assign_pointer(br->vlgrp, vg);
948 949 950 951 952 953 954 955 956 957
	ret = br_vlan_add(br, 1,
			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
			  BRIDGE_VLAN_INFO_BRENTRY);
	if (ret)
		goto err_vlan_add;

out:
	return ret;

err_vlan_add:
958 959
	vlan_tunnel_deinit(vg);
err_tunnel_init:
960
	rhashtable_destroy(&vg->vlan_hash);
961
err_rhtbl:
962
	kfree(vg);
963 964 965 966 967 968

	goto out;
}

int nbp_vlan_init(struct net_bridge_port *p)
{
969 970 971 972 973 974
	struct switchdev_attr attr = {
		.orig_dev = p->br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
		.u.vlan_filtering = p->br->vlan_enabled,
	};
975
	struct net_bridge_vlan_group *vg;
976 977
	int ret = -ENOMEM;

978 979
	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
	if (!vg)
980 981
		goto out;

982 983 984 985
	ret = switchdev_port_attr_set(p->dev, &attr);
	if (ret && ret != -EOPNOTSUPP)
		goto err_vlan_enabled;

986
	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
987 988
	if (ret)
		goto err_rhtbl;
989 990 991
	ret = vlan_tunnel_init(vg);
	if (ret)
		goto err_tunnel_init;
992
	INIT_LIST_HEAD(&vg->vlan_list);
993
	rcu_assign_pointer(p->vlgrp, vg);
994 995 996 997 998 999 1000 1001 1002 1003 1004
	if (p->br->default_pvid) {
		ret = nbp_vlan_add(p, p->br->default_pvid,
				   BRIDGE_VLAN_INFO_PVID |
				   BRIDGE_VLAN_INFO_UNTAGGED);
		if (ret)
			goto err_vlan_add;
	}
out:
	return ret;

err_vlan_add:
1005 1006
	RCU_INIT_POINTER(p->vlgrp, NULL);
	synchronize_rcu();
1007 1008 1009
	vlan_tunnel_deinit(vg);
err_tunnel_init:
	rhashtable_destroy(&vg->vlan_hash);
1010
err_rhtbl:
1011
err_vlan_enabled:
1012
	kfree(vg);
1013 1014

	goto out;
1015 1016
}

1017 1018 1019
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
1020
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
1021
{
1022 1023 1024 1025 1026 1027 1028
	struct switchdev_obj_port_vlan v = {
		.obj.orig_dev = port->dev,
		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
		.flags = flags,
		.vid_begin = vid,
		.vid_end = vid,
	};
1029 1030
	struct net_bridge_vlan *vlan;
	int ret;
1031 1032 1033

	ASSERT_RTNL();

1034
	vlan = br_vlan_find(nbp_vlan_group(port), vid);
1035
	if (vlan) {
1036 1037 1038 1039
		/* Pass the flags to the hardware bridge */
		ret = switchdev_port_obj_add(port->dev, &v.obj);
		if (ret && ret != -EOPNOTSUPP)
			return ret;
1040 1041
		__vlan_add_flags(vlan, flags);
		return 0;
1042 1043
	}

1044 1045 1046
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
		return -ENOMEM;
1047

1048 1049 1050 1051 1052
	vlan->vid = vid;
	vlan->port = port;
	ret = __vlan_add(vlan, flags);
	if (ret)
		kfree(vlan);
1053

1054
	return ret;
1055 1056
}

1057 1058 1059
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
1060 1061
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{
1062
	struct net_bridge_vlan *v;
1063 1064 1065

	ASSERT_RTNL();

1066
	v = br_vlan_find(nbp_vlan_group(port), vid);
1067 1068
	if (!v)
		return -ENOENT;
1069
	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1070
	br_fdb_delete_by_port(port->br, port, vid, 0);
1071

1072
	return __vlan_del(v);
1073 1074 1075 1076
}

void nbp_vlan_flush(struct net_bridge_port *port)
{
1077 1078
	struct net_bridge_vlan_group *vg;

1079 1080
	ASSERT_RTNL();

1081 1082 1083 1084 1085
	vg = nbp_vlan_group(port);
	__vlan_flush(vg);
	RCU_INIT_POINTER(port->vlgrp, NULL);
	synchronize_rcu();
	__vlan_group_free(vg);
1086
}
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113

void br_vlan_get_stats(const struct net_bridge_vlan *v,
		       struct br_vlan_stats *stats)
{
	int i;

	memset(stats, 0, sizeof(*stats));
	for_each_possible_cpu(i) {
		u64 rxpackets, rxbytes, txpackets, txbytes;
		struct br_vlan_stats *cpu_stats;
		unsigned int start;

		cpu_stats = per_cpu_ptr(v->stats, i);
		do {
			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
			rxpackets = cpu_stats->rx_packets;
			rxbytes = cpu_stats->rx_bytes;
			txbytes = cpu_stats->tx_bytes;
			txpackets = cpu_stats->tx_packets;
		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));

		stats->rx_packets += rxpackets;
		stats->rx_bytes += rxbytes;
		stats->tx_bytes += txbytes;
		stats->tx_packets += txpackets;
	}
}