br_vlan.c 21.0 KB
Newer Older
1 2 3 4
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
5
#include <net/switchdev.h>
6 7 8

#include "br_private.h"

9 10
static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
			      const void *ptr)
11
{
12 13 14 15 16 17 18 19 20 21
	const struct net_bridge_vlan *vle = ptr;
	u16 vid = *(u16 *)arg->key;

	return vle->vid != vid;
}

static const struct rhashtable_params br_vlan_rht_params = {
	.head_offset = offsetof(struct net_bridge_vlan, vnode),
	.key_offset = offsetof(struct net_bridge_vlan, vid),
	.key_len = sizeof(u16),
22 23
	.nelem_hint = 3,
	.locks_mul = 1,
24 25 26 27 28 29 30 31 32 33
	.max_size = VLAN_N_VID,
	.obj_cmpfn = br_vlan_cmp,
	.automatic_shrinking = true,
};

static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
{
	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
}

34
static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
35
{
36
	if (vg->pvid == vid)
37 38 39
		return;

	smp_wmb();
40
	vg->pvid = vid;
41 42
}

43
static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
44
{
45
	if (vg->pvid != vid)
46 47 48
		return;

	smp_wmb();
49
	vg->pvid = 0;
50 51
}

52
static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
53
{
54 55 56 57 58 59 60 61 62 63 64
	struct net_bridge_vlan_group *vg;

	if (br_vlan_is_master(v))
		vg = v->br->vlgrp;
	else
		vg = v->port->vlgrp;

	if (flags & BRIDGE_VLAN_INFO_PVID)
		__vlan_add_pvid(vg, v->vid);
	else
		__vlan_delete_pvid(vg, v->vid);
65 66

	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
67
		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
68
	else
69
		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
70 71
}

72 73 74
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
			  u16 vid, u16 flags)
{
75 76 77 78 79 80
	struct switchdev_obj_port_vlan v = {
		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
		.flags = flags,
		.vid_begin = vid,
		.vid_end = vid,
	};
81 82
	int err;

83 84
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q add.
85
	 */
86 87 88
	err = switchdev_port_obj_add(dev, &v.obj);
	if (err == -EOPNOTSUPP)
		return vlan_vid_add(dev, br->vlan_proto, vid);
89 90 91
	return err;
}

92
static void __vlan_add_list(struct net_bridge_vlan *v)
93
{
94 95
	struct list_head *headp, *hpos;
	struct net_bridge_vlan *vent;
96

97 98 99 100 101 102 103 104
	headp = br_vlan_is_master(v) ? &v->br->vlgrp->vlan_list :
				       &v->port->vlgrp->vlan_list;
	list_for_each_prev(hpos, headp) {
		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
		if (v->vid < vent->vid)
			continue;
		else
			break;
105
	}
106
	list_add_rcu(&v->vlist, hpos);
107
}
108

109 110
static void __vlan_del_list(struct net_bridge_vlan *v)
{
111
	list_del_rcu(&v->vlist);
112 113
}

114 115
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
			  u16 vid)
116
{
117 118 119 120 121 122
	struct switchdev_obj_port_vlan v = {
		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
		.vid_begin = vid,
		.vid_end = vid,
	};
	int err;
123

124 125
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q del.
126
	 */
127 128
	err = switchdev_port_obj_del(dev, &v.obj);
	if (err == -EOPNOTSUPP) {
129
		vlan_vid_del(dev, br->vlan_proto, vid);
130
		return 0;
131
	}
132
	return err;
133 134
}

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/* Returns a master vlan, if it didn't exist it gets created. In all cases a
 * a reference is taken to the master vlan before returning.
 */
static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
{
	struct net_bridge_vlan *masterv;

	masterv = br_vlan_find(br->vlgrp, vid);
	if (!masterv) {
		/* missing global ctx, create it now */
		if (br_vlan_add(br, vid, 0))
			return NULL;
		masterv = br_vlan_find(br->vlgrp, vid);
		if (WARN_ON(!masterv))
			return NULL;
	}
	atomic_inc(&masterv->refcnt);

	return masterv;
}

static void br_vlan_put_master(struct net_bridge_vlan *masterv)
{
	if (!br_vlan_is_master(masterv))
		return;

	if (atomic_dec_and_test(&masterv->refcnt)) {
		rhashtable_remove_fast(&masterv->br->vlgrp->vlan_hash,
				       &masterv->vnode, br_vlan_rht_params);
		__vlan_del_list(masterv);
		kfree_rcu(masterv, rcu);
	}
}

169 170 171 172 173 174 175 176 177 178 179 180
/* This is the shared VLAN add function which works for both ports and bridge
 * devices. There are four possible calls to this function in terms of the
 * vlan entry type:
 * 1. vlan is being added on a port (no master flags, global entry exists)
 * 2. vlan is being added on a bridge (both master and brvlan flags)
 * 3. vlan is being added on a port, but a global entry didn't exist which
 *    is being created right now (master flag set, brvlan flag unset), the
 *    global entry is used for global per-vlan features, but not for filtering
 * 4. same as 3 but with both master and brvlan flags set so the entry
 *    will be used for filtering in both the port and the bridge
 */
static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
181
{
182 183
	struct net_bridge_vlan *masterv = NULL;
	struct net_bridge_port *p = NULL;
184
	struct net_bridge_vlan_group *vg;
185 186 187 188 189 190 191
	struct net_device *dev;
	struct net_bridge *br;
	int err;

	if (br_vlan_is_master(v)) {
		br = v->br;
		dev = br->dev;
192
		vg = br->vlgrp;
193 194 195 196
	} else {
		p = v->port;
		br = p->br;
		dev = p->dev;
197
		vg = p->vlgrp;
198 199 200 201 202 203 204 205 206 207 208 209 210
	}

	if (p) {
		/* Add VLAN to the device filter if it is supported.
		 * This ensures tagged traffic enters the bridge when
		 * promiscuous mode is disabled by br_manage_promisc().
		 */
		err = __vlan_vid_add(dev, br, v->vid, flags);
		if (err)
			goto out;

		/* need to work on the master vlan too */
		if (flags & BRIDGE_VLAN_INFO_MASTER) {
211 212
			err = br_vlan_add(br, v->vid, flags |
						      BRIDGE_VLAN_INFO_BRENTRY);
213 214 215 216
			if (err)
				goto out_filt;
		}

217 218 219
		masterv = br_vlan_get_master(br, v->vid);
		if (!masterv)
			goto out_filt;
220 221 222
		v->brvlan = masterv;
	}

223
	/* Add the dev mac and count the vlan only if it's usable */
224 225 226 227 228 229
	if (br_vlan_should_use(v)) {
		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
		if (err) {
			br_err(br, "failed insert local address into bridge forwarding table\n");
			goto out_filt;
		}
230
		vg->num_vlans++;
231 232
	}

233 234
	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
					    br_vlan_rht_params);
235 236
	if (err)
		goto out_fdb_insert;
237

238 239 240 241 242 243
	__vlan_add_list(v);
	__vlan_add_flags(v, flags);
out:
	return err;

out_fdb_insert:
244 245 246 247
	if (br_vlan_should_use(v)) {
		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
		vg->num_vlans--;
	}
248 249 250 251 252

out_filt:
	if (p) {
		__vlan_vid_del(dev, br, v->vid);
		if (masterv) {
253
			br_vlan_put_master(masterv);
254 255 256 257 258 259 260 261 262 263
			v->brvlan = NULL;
		}
	}

	goto out;
}

static int __vlan_del(struct net_bridge_vlan *v)
{
	struct net_bridge_vlan *masterv = v;
264
	struct net_bridge_vlan_group *vg;
265 266
	struct net_bridge_port *p = NULL;
	int err = 0;
267

268
	if (br_vlan_is_master(v)) {
269
		vg = v->br->vlgrp;
270 271
	} else {
		p = v->port;
272
		vg = v->port->vlgrp;
273 274
		masterv = v->brvlan;
	}
275

276
	__vlan_delete_pvid(vg, v->vid);
277 278
	if (p) {
		err = __vlan_vid_del(p->dev, p->br, v->vid);
279
		if (err)
280
			goto out;
281
	}
282

283 284 285
	if (br_vlan_should_use(v)) {
		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
		vg->num_vlans--;
286 287 288
	}

	if (masterv != v) {
289 290
		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
				       br_vlan_rht_params);
291
		__vlan_del_list(v);
292 293
		kfree_rcu(v, rcu);
	}
294

295
	br_vlan_put_master(masterv);
296 297
out:
	return err;
298 299
}

300
static void __vlan_flush(struct net_bridge_vlan_group *vlgrp)
301
{
302 303
	struct net_bridge_vlan *vlan, *tmp;

304
	__vlan_delete_pvid(vlgrp, vlgrp->pvid);
305 306 307 308
	list_for_each_entry_safe(vlan, tmp, &vlgrp->vlan_list, vlist)
		__vlan_del(vlan);
	rhashtable_destroy(&vlgrp->vlan_hash);
	kfree(vlgrp);
309 310
}

311
struct sk_buff *br_handle_vlan(struct net_bridge *br,
312
			       struct net_bridge_vlan_group *vg,
313
			       struct sk_buff *skb)
314
{
315
	struct net_bridge_vlan *v;
316 317
	u16 vid;

318 319
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
320 321
		goto out;

322 323 324 325 326 327 328
	/* At this point, we know that the frame was filtered and contains
	 * a valid vlan id.  If the vlan id has untagged flag set,
	 * send untagged; otherwise, send tagged.
	 */
	br_vlan_get_tag(skb, &vid);
	v = br_vlan_find(vg, vid);
	/* Vlan entry must be configured at this point.  The
329 330 331 332
	 * only exception is the bridge is set in promisc mode and the
	 * packet is destined for the bridge device.  In this case
	 * pass the packet as is.
	 */
333
	if (!v || !br_vlan_should_use(v)) {
334 335 336 337 338 339 340
		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
			goto out;
		} else {
			kfree_skb(skb);
			return NULL;
		}
	}
341
	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
342
		skb->vlan_tci = 0;
343 344 345 346 347 348

out:
	return skb;
}

/* Called under RCU */
349
static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
350
			      struct sk_buff *skb, u16 *vid)
351
{
352
	const struct net_bridge_vlan *v;
353
	bool tagged;
354

355
	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
356 357 358 359
	/* If vlan tx offload is disabled on bridge device and frame was
	 * sent from vlan device on the bridge device, it does not have
	 * HW accelerated vlan tag.
	 */
360
	if (unlikely(!skb_vlan_tag_present(skb) &&
361
		     skb->protocol == proto)) {
362
		skb = skb_vlan_untag(skb);
363 364 365 366
		if (unlikely(!skb))
			return false;
	}

367 368 369 370 371
	if (!br_vlan_get_tag(skb, vid)) {
		/* Tagged frame */
		if (skb->vlan_proto != proto) {
			/* Protocol-mismatch, empty out vlan_tci for new tag */
			skb_push(skb, ETH_HLEN);
372
			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
373
							skb_vlan_tag_get(skb));
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
			if (unlikely(!skb))
				return false;

			skb_pull(skb, ETH_HLEN);
			skb_reset_mac_len(skb);
			*vid = 0;
			tagged = false;
		} else {
			tagged = true;
		}
	} else {
		/* Untagged frame */
		tagged = false;
	}

389
	if (!*vid) {
390 391
		u16 pvid = br_get_pvid(vg);

392 393 394
		/* Frame had a tag with VID 0 or did not have a tag.
		 * See if pvid is set on this port.  That tells us which
		 * vlan untagged or priority-tagged traffic belongs to.
395
		 */
V
Vlad Yasevich 已提交
396
		if (!pvid)
397
			goto drop;
398

399 400
		/* PVID is set on this port.  Any untagged or priority-tagged
		 * ingress frame is considered to belong to this vlan.
401
		 */
402
		*vid = pvid;
403
		if (likely(!tagged))
404
			/* Untagged Frame. */
405
			__vlan_hwaccel_put_tag(skb, proto, pvid);
406 407 408 409 410 411 412 413
		else
			/* Priority-tagged Frame.
			 * At this point, We know that skb->vlan_tci had
			 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
			 * We update only VID field and preserve PCP field.
			 */
			skb->vlan_tci |= pvid;

414 415 416 417
		return true;
	}

	/* Frame had a valid vlan tag.  See if vlan is allowed */
418
	v = br_vlan_find(vg, *vid);
419
	if (v && br_vlan_should_use(v))
420
		return true;
421 422
drop:
	kfree_skb(skb);
423 424 425
	return false;
}

426 427 428
bool br_allowed_ingress(const struct net_bridge *br,
			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
			u16 *vid)
429 430 431 432 433 434 435 436 437
{
	/* If VLAN filtering is disabled on the bridge, all packets are
	 * permitted.
	 */
	if (!br->vlan_enabled) {
		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
		return true;
	}

438
	return __allowed_ingress(vg, br->vlan_proto, skb, vid);
439 440
}

441
/* Called under RCU. */
442
bool br_allowed_egress(struct net_bridge_vlan_group *vg,
443 444
		       const struct sk_buff *skb)
{
445
	const struct net_bridge_vlan *v;
446 447
	u16 vid;

448 449
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
450 451 452
		return true;

	br_vlan_get_tag(skb, &vid);
453 454
	v = br_vlan_find(vg, vid);
	if (v && br_vlan_should_use(v))
455 456 457 458 459
		return true;

	return false;
}

460 461 462
/* Called under RCU */
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
{
463
	struct net_bridge_vlan_group *vg;
464 465
	struct net_bridge *br = p->br;

466
	/* If filtering was disabled at input, let it pass. */
467
	if (!br->vlan_enabled)
468 469
		return true;

470 471
	vg = p->vlgrp;
	if (!vg || !vg->num_vlans)
472 473
		return false;

474 475 476
	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
		*vid = 0;

477
	if (!*vid) {
478
		*vid = br_get_pvid(vg);
V
Vlad Yasevich 已提交
479
		if (!*vid)
480 481 482 483 484
			return false;

		return true;
	}

485
	if (br_vlan_find(vg, *vid))
486 487 488 489 490
		return true;

	return false;
}

491 492 493
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
494
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
495
{
496 497
	struct net_bridge_vlan *vlan;
	int ret;
498 499 500

	ASSERT_RTNL();

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
	vlan = br_vlan_find(br->vlgrp, vid);
	if (vlan) {
		if (!br_vlan_is_brentry(vlan)) {
			/* Trying to change flags of non-existent bridge vlan */
			if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
				return -EINVAL;
			/* It was only kept for port vlans, now make it real */
			ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
					    vlan->vid);
			if (ret) {
				br_err(br, "failed insert local address into bridge forwarding table\n");
				return ret;
			}
			atomic_inc(&vlan->refcnt);
			vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
			br->vlgrp->num_vlans++;
		}
		__vlan_add_flags(vlan, flags);
		return 0;
	}
521

522 523
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
524 525
		return -ENOMEM;

526 527 528 529 530 531 532 533 534
	vlan->vid = vid;
	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
	vlan->br = br;
	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
		atomic_set(&vlan->refcnt, 1);
	ret = __vlan_add(vlan, flags);
	if (ret)
		kfree(vlan);
535

536
	return ret;
537 538
}

539 540 541
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
542 543
int br_vlan_delete(struct net_bridge *br, u16 vid)
{
544
	struct net_bridge_vlan *v;
545 546 547

	ASSERT_RTNL();

548 549 550
	v = br_vlan_find(br->vlgrp, vid);
	if (!v || !br_vlan_is_brentry(v))
		return -ENOENT;
551

552
	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
553
	br_fdb_delete_by_port(br, NULL, vid, 0);
554

555
	return __vlan_del(v);
556 557 558 559 560 561
}

void br_vlan_flush(struct net_bridge *br)
{
	ASSERT_RTNL();

562
	__vlan_flush(br_vlan_group(br));
563 564
}

565
struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
566
{
567 568
	if (!vg)
		return NULL;
569

570
	return br_vlan_lookup(&vg->vlan_hash, vid);
571 572
}

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
/* Must be protected by RTNL. */
static void recalculate_group_addr(struct net_bridge *br)
{
	if (br->group_addr_set)
		return;

	spin_lock_bh(&br->lock);
	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
		/* Bridge Group Address */
		br->group_addr[5] = 0x00;
	} else { /* vlan_enabled && ETH_P_8021AD */
		/* Provider Bridge Group Address */
		br->group_addr[5] = 0x08;
	}
	spin_unlock_bh(&br->lock);
}

/* Must be protected by RTNL. */
void br_recalculate_fwd_mask(struct net_bridge *br)
{
	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
	else /* vlan_enabled && ETH_P_8021AD */
		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
					      ~(1u << br->group_addr[5]);
}

600
int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
601 602
{
	if (br->vlan_enabled == val)
603
		return 0;
604 605

	br->vlan_enabled = val;
606
	br_manage_promisc(br);
607 608
	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);
609

610 611 612 613 614 615 616 617 618
	return 0;
}

int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
{
	if (!rtnl_trylock())
		return restart_syscall();

	__br_vlan_filter_toggle(br, val);
619
	rtnl_unlock();
620

621 622 623
	return 0;
}

624
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
625 626 627
{
	int err = 0;
	struct net_bridge_port *p;
628
	struct net_bridge_vlan *vlan;
629
	__be16 oldproto;
630 631

	if (br->vlan_proto == proto)
632
		return 0;
633 634 635

	/* Add VLANs for the new proto to the device filter. */
	list_for_each_entry(p, &br->port_list, list) {
636 637
		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist) {
			err = vlan_vid_add(p->dev, proto, vlan->vid);
638 639 640 641 642 643 644 645 646 647 648 649
			if (err)
				goto err_filt;
		}
	}

	oldproto = br->vlan_proto;
	br->vlan_proto = proto;

	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);

	/* Delete VLANs for the old proto from the device filter. */
650 651 652
	list_for_each_entry(p, &br->port_list, list)
		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
			vlan_vid_del(p->dev, oldproto, vlan->vid);
653

654
	return 0;
655 656

err_filt:
657 658
	list_for_each_entry_continue_reverse(vlan, &p->vlgrp->vlan_list, vlist)
		vlan_vid_del(p->dev, proto, vlan->vid);
659

660 661 662
	list_for_each_entry_continue_reverse(p, &br->port_list, list)
		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
			vlan_vid_del(p->dev, proto, vlan->vid);
663

664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
	return err;
}

int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
{
	int err;

	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
		return -EPROTONOSUPPORT;

	if (!rtnl_trylock())
		return restart_syscall();

	err = __br_vlan_set_proto(br, htons(val));
	rtnl_unlock();

	return err;
681 682
}

683
static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
684
{
685 686
	struct net_bridge_vlan *v;

687
	if (vid != vg->pvid)
688 689 690 691 692 693 694 695
		return false;

	v = br_vlan_lookup(&vg->vlan_hash, vid);
	if (v && br_vlan_should_use(v) &&
	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
		return true;

	return false;
696 697 698 699 700 701 702 703 704 705
}

static void br_vlan_disable_default_pvid(struct net_bridge *br)
{
	struct net_bridge_port *p;
	u16 pvid = br->default_pvid;

	/* Disable default_pvid on all ports where it is still
	 * configured.
	 */
706
	if (vlan_default_pvid(br->vlgrp, pvid))
707 708 709
		br_vlan_delete(br, pvid);

	list_for_each_entry(p, &br->port_list, list) {
710
		if (vlan_default_pvid(p->vlgrp, pvid))
711 712 713 714 715 716
			nbp_vlan_delete(p, pvid);
	}

	br->default_pvid = 0;
}

717
int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
718
{
719
	const struct net_bridge_vlan *pvent;
720 721 722 723 724
	struct net_bridge_port *p;
	u16 old_pvid;
	int err = 0;
	unsigned long *changed;

725 726 727 728 729
	if (!pvid) {
		br_vlan_disable_default_pvid(br);
		return 0;
	}

730 731 732 733 734 735 736 737 738 739
	changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
			  GFP_KERNEL);
	if (!changed)
		return -ENOMEM;

	old_pvid = br->default_pvid;

	/* Update default_pvid config only if we do not conflict with
	 * user configuration.
	 */
740
	pvent = br_vlan_find(br->vlgrp, pvid);
741
	if ((!old_pvid || vlan_default_pvid(br->vlgrp, old_pvid)) &&
742
	    (!pvent || !br_vlan_should_use(pvent))) {
743 744
		err = br_vlan_add(br, pvid,
				  BRIDGE_VLAN_INFO_PVID |
745 746
				  BRIDGE_VLAN_INFO_UNTAGGED |
				  BRIDGE_VLAN_INFO_BRENTRY);
747 748 749 750 751 752 753 754 755 756 757
		if (err)
			goto out;
		br_vlan_delete(br, old_pvid);
		set_bit(0, changed);
	}

	list_for_each_entry(p, &br->port_list, list) {
		/* Update default_pvid config only if we do not conflict with
		 * user configuration.
		 */
		if ((old_pvid &&
758
		     !vlan_default_pvid(p->vlgrp, old_pvid)) ||
759
		    br_vlan_find(p->vlgrp, pvid))
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
			continue;

		err = nbp_vlan_add(p, pvid,
				   BRIDGE_VLAN_INFO_PVID |
				   BRIDGE_VLAN_INFO_UNTAGGED);
		if (err)
			goto err_port;
		nbp_vlan_delete(p, old_pvid);
		set_bit(p->port_no, changed);
	}

	br->default_pvid = pvid;

out:
	kfree(changed);
	return err;

err_port:
	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
		if (!test_bit(p->port_no, changed))
			continue;

		if (old_pvid)
			nbp_vlan_add(p, old_pvid,
				     BRIDGE_VLAN_INFO_PVID |
				     BRIDGE_VLAN_INFO_UNTAGGED);
		nbp_vlan_delete(p, pvid);
	}

	if (test_bit(0, changed)) {
		if (old_pvid)
			br_vlan_add(br, old_pvid,
				    BRIDGE_VLAN_INFO_PVID |
793 794
				    BRIDGE_VLAN_INFO_UNTAGGED |
				    BRIDGE_VLAN_INFO_BRENTRY);
795 796 797 798 799
		br_vlan_delete(br, pvid);
	}
	goto out;
}

800 801 802 803 804
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
{
	u16 pvid = val;
	int err = 0;

805
	if (val >= VLAN_VID_MASK)
806 807 808 809 810 811 812 813 814 815 816 817 818 819
		return -EINVAL;

	if (!rtnl_trylock())
		return restart_syscall();

	if (pvid == br->default_pvid)
		goto unlock;

	/* Only allow default pvid change when filtering is disabled */
	if (br->vlan_enabled) {
		pr_info_once("Please disable vlan filtering to change default_pvid\n");
		err = -EPERM;
		goto unlock;
	}
820
	err = __br_vlan_set_default_pvid(br, pvid);
821 822 823 824 825
unlock:
	rtnl_unlock();
	return err;
}

826
int br_vlan_init(struct net_bridge *br)
827
{
828 829 830 831 832 833 834 835 836
	int ret = -ENOMEM;

	br->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
	if (!br->vlgrp)
		goto out;
	ret = rhashtable_init(&br->vlgrp->vlan_hash, &br_vlan_rht_params);
	if (ret)
		goto err_rhtbl;
	INIT_LIST_HEAD(&br->vlgrp->vlan_list);
837
	br->vlan_proto = htons(ETH_P_8021Q);
838
	br->default_pvid = 1;
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
	ret = br_vlan_add(br, 1,
			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
			  BRIDGE_VLAN_INFO_BRENTRY);
	if (ret)
		goto err_vlan_add;

out:
	return ret;

err_vlan_add:
	rhashtable_destroy(&br->vlgrp->vlan_hash);
err_rhtbl:
	kfree(br->vlgrp);

	goto out;
}

int nbp_vlan_init(struct net_bridge_port *p)
{
858
	struct net_bridge_vlan_group *vg;
859 860
	int ret = -ENOMEM;

861 862
	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
	if (!vg)
863 864
		goto out;

865
	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
866 867
	if (ret)
		goto err_rhtbl;
868 869 870 871
	INIT_LIST_HEAD(&vg->vlan_list);
	/* Make sure everything's committed before publishing vg */
	smp_wmb();
	p->vlgrp = vg;
872 873 874 875 876 877 878 879 880 881 882
	if (p->br->default_pvid) {
		ret = nbp_vlan_add(p, p->br->default_pvid,
				   BRIDGE_VLAN_INFO_PVID |
				   BRIDGE_VLAN_INFO_UNTAGGED);
		if (ret)
			goto err_vlan_add;
	}
out:
	return ret;

err_vlan_add:
883
	rhashtable_destroy(&vg->vlan_hash);
884
err_rhtbl:
885
	kfree(vg);
886 887

	goto out;
888 889
}

890 891 892
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
893
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
894
{
895 896
	struct net_bridge_vlan *vlan;
	int ret;
897 898 899

	ASSERT_RTNL();

900 901 902 903
	vlan = br_vlan_find(port->vlgrp, vid);
	if (vlan) {
		__vlan_add_flags(vlan, flags);
		return 0;
904 905
	}

906 907 908
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
		return -ENOMEM;
909

910 911 912 913 914
	vlan->vid = vid;
	vlan->port = port;
	ret = __vlan_add(vlan, flags);
	if (ret)
		kfree(vlan);
915

916
	return ret;
917 918
}

919 920 921
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
922 923
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{
924
	struct net_bridge_vlan *v;
925 926 927

	ASSERT_RTNL();

928 929 930
	v = br_vlan_find(port->vlgrp, vid);
	if (!v)
		return -ENOENT;
931
	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
932
	br_fdb_delete_by_port(port->br, port, vid, 0);
933

934
	return __vlan_del(v);
935 936 937 938
}

void nbp_vlan_flush(struct net_bridge_port *port)
{
939
	struct net_bridge_vlan *vlan;
940 941 942

	ASSERT_RTNL();

943 944
	list_for_each_entry(vlan, &port->vlgrp->vlan_list, vlist)
		vlan_vid_del(port->dev, port->br->vlan_proto, vlan->vid);
945

946
	__vlan_flush(nbp_vlan_group(port));
947
}