br_vlan.c 24.2 KB
Newer Older
1 2 3 4
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
5
#include <net/switchdev.h>
6 7 8

#include "br_private.h"

9 10
static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
			      const void *ptr)
11
{
12 13 14 15 16 17 18 19 20 21
	const struct net_bridge_vlan *vle = ptr;
	u16 vid = *(u16 *)arg->key;

	return vle->vid != vid;
}

static const struct rhashtable_params br_vlan_rht_params = {
	.head_offset = offsetof(struct net_bridge_vlan, vnode),
	.key_offset = offsetof(struct net_bridge_vlan, vid),
	.key_len = sizeof(u16),
22 23
	.nelem_hint = 3,
	.locks_mul = 1,
24 25 26 27 28 29 30 31 32 33
	.max_size = VLAN_N_VID,
	.obj_cmpfn = br_vlan_cmp,
	.automatic_shrinking = true,
};

static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
{
	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
}

34
static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
35
{
36
	if (vg->pvid == vid)
37 38 39
		return;

	smp_wmb();
40
	vg->pvid = vid;
41 42
}

43
static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
44
{
45
	if (vg->pvid != vid)
46 47 48
		return;

	smp_wmb();
49
	vg->pvid = 0;
50 51
}

52
static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
53
{
54 55 56
	struct net_bridge_vlan_group *vg;

	if (br_vlan_is_master(v))
57
		vg = br_vlan_group(v->br);
58
	else
59
		vg = nbp_vlan_group(v->port);
60 61 62 63 64

	if (flags & BRIDGE_VLAN_INFO_PVID)
		__vlan_add_pvid(vg, v->vid);
	else
		__vlan_delete_pvid(vg, v->vid);
65 66

	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
67
		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
68
	else
69
		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
70 71
}

72 73 74
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
			  u16 vid, u16 flags)
{
75
	struct switchdev_obj_port_vlan v = {
76
		.obj.orig_dev = dev,
77 78 79 80 81
		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
		.flags = flags,
		.vid_begin = vid,
		.vid_end = vid,
	};
82 83
	int err;

84 85
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q add.
86
	 */
87 88 89
	err = switchdev_port_obj_add(dev, &v.obj);
	if (err == -EOPNOTSUPP)
		return vlan_vid_add(dev, br->vlan_proto, vid);
90 91 92
	return err;
}

93
static void __vlan_add_list(struct net_bridge_vlan *v)
94
{
95
	struct net_bridge_vlan_group *vg;
96 97
	struct list_head *headp, *hpos;
	struct net_bridge_vlan *vent;
98

99 100 101 102 103 104
	if (br_vlan_is_master(v))
		vg = br_vlan_group(v->br);
	else
		vg = nbp_vlan_group(v->port);

	headp = &vg->vlan_list;
105 106 107 108 109 110
	list_for_each_prev(hpos, headp) {
		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
		if (v->vid < vent->vid)
			continue;
		else
			break;
111
	}
112
	list_add_rcu(&v->vlist, hpos);
113
}
114

115 116
static void __vlan_del_list(struct net_bridge_vlan *v)
{
117
	list_del_rcu(&v->vlist);
118 119
}

120 121
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
			  u16 vid)
122
{
123
	struct switchdev_obj_port_vlan v = {
124
		.obj.orig_dev = dev,
125 126 127 128 129
		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
		.vid_begin = vid,
		.vid_end = vid,
	};
	int err;
130

131 132
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q del.
133
	 */
134 135
	err = switchdev_port_obj_del(dev, &v.obj);
	if (err == -EOPNOTSUPP) {
136
		vlan_vid_del(dev, br->vlan_proto, vid);
137
		return 0;
138
	}
139
	return err;
140 141
}

142 143 144 145 146
/* Returns a master vlan, if it didn't exist it gets created. In all cases a
 * a reference is taken to the master vlan before returning.
 */
static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
{
147
	struct net_bridge_vlan_group *vg;
148 149
	struct net_bridge_vlan *masterv;

150 151
	vg = br_vlan_group(br);
	masterv = br_vlan_find(vg, vid);
152 153 154 155
	if (!masterv) {
		/* missing global ctx, create it now */
		if (br_vlan_add(br, vid, 0))
			return NULL;
156
		masterv = br_vlan_find(vg, vid);
157 158 159 160 161 162 163 164
		if (WARN_ON(!masterv))
			return NULL;
	}
	atomic_inc(&masterv->refcnt);

	return masterv;
}

165 166 167 168 169 170 171 172 173 174 175
static void br_master_vlan_rcu_free(struct rcu_head *rcu)
{
	struct net_bridge_vlan *v;

	v = container_of(rcu, struct net_bridge_vlan, rcu);
	WARN_ON(!br_vlan_is_master(v));
	free_percpu(v->stats);
	v->stats = NULL;
	kfree(v);
}

176 177
static void br_vlan_put_master(struct net_bridge_vlan *masterv)
{
178 179
	struct net_bridge_vlan_group *vg;

180 181 182
	if (!br_vlan_is_master(masterv))
		return;

183
	vg = br_vlan_group(masterv->br);
184
	if (atomic_dec_and_test(&masterv->refcnt)) {
185
		rhashtable_remove_fast(&vg->vlan_hash,
186 187
				       &masterv->vnode, br_vlan_rht_params);
		__vlan_del_list(masterv);
188
		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
189 190 191
	}
}

192 193 194 195
/* This is the shared VLAN add function which works for both ports and bridge
 * devices. There are four possible calls to this function in terms of the
 * vlan entry type:
 * 1. vlan is being added on a port (no master flags, global entry exists)
196
 * 2. vlan is being added on a bridge (both master and brentry flags)
197
 * 3. vlan is being added on a port, but a global entry didn't exist which
198
 *    is being created right now (master flag set, brentry flag unset), the
199
 *    global entry is used for global per-vlan features, but not for filtering
200
 * 4. same as 3 but with both master and brentry flags set so the entry
201 202 203
 *    will be used for filtering in both the port and the bridge
 */
static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
204
{
205 206
	struct net_bridge_vlan *masterv = NULL;
	struct net_bridge_port *p = NULL;
207
	struct net_bridge_vlan_group *vg;
208 209 210 211 212 213 214
	struct net_device *dev;
	struct net_bridge *br;
	int err;

	if (br_vlan_is_master(v)) {
		br = v->br;
		dev = br->dev;
215
		vg = br_vlan_group(br);
216 217 218 219
	} else {
		p = v->port;
		br = p->br;
		dev = p->dev;
220
		vg = nbp_vlan_group(p);
221 222 223 224 225 226 227 228 229 230 231 232 233
	}

	if (p) {
		/* Add VLAN to the device filter if it is supported.
		 * This ensures tagged traffic enters the bridge when
		 * promiscuous mode is disabled by br_manage_promisc().
		 */
		err = __vlan_vid_add(dev, br, v->vid, flags);
		if (err)
			goto out;

		/* need to work on the master vlan too */
		if (flags & BRIDGE_VLAN_INFO_MASTER) {
234 235
			err = br_vlan_add(br, v->vid, flags |
						      BRIDGE_VLAN_INFO_BRENTRY);
236 237 238 239
			if (err)
				goto out_filt;
		}

240 241 242
		masterv = br_vlan_get_master(br, v->vid);
		if (!masterv)
			goto out_filt;
243
		v->brvlan = masterv;
244
		v->stats = masterv->stats;
245 246
	}

247
	/* Add the dev mac and count the vlan only if it's usable */
248 249 250 251 252 253
	if (br_vlan_should_use(v)) {
		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
		if (err) {
			br_err(br, "failed insert local address into bridge forwarding table\n");
			goto out_filt;
		}
254
		vg->num_vlans++;
255 256
	}

257 258
	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
					    br_vlan_rht_params);
259 260
	if (err)
		goto out_fdb_insert;
261

262 263 264 265 266 267
	__vlan_add_list(v);
	__vlan_add_flags(v, flags);
out:
	return err;

out_fdb_insert:
268 269 270 271
	if (br_vlan_should_use(v)) {
		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
		vg->num_vlans--;
	}
272 273 274 275 276

out_filt:
	if (p) {
		__vlan_vid_del(dev, br, v->vid);
		if (masterv) {
277
			br_vlan_put_master(masterv);
278 279 280 281 282 283 284 285 286 287
			v->brvlan = NULL;
		}
	}

	goto out;
}

static int __vlan_del(struct net_bridge_vlan *v)
{
	struct net_bridge_vlan *masterv = v;
288
	struct net_bridge_vlan_group *vg;
289 290
	struct net_bridge_port *p = NULL;
	int err = 0;
291

292
	if (br_vlan_is_master(v)) {
293
		vg = br_vlan_group(v->br);
294 295
	} else {
		p = v->port;
296
		vg = nbp_vlan_group(v->port);
297 298
		masterv = v->brvlan;
	}
299

300
	__vlan_delete_pvid(vg, v->vid);
301 302
	if (p) {
		err = __vlan_vid_del(p->dev, p->br, v->vid);
303
		if (err)
304
			goto out;
305
	}
306

307 308 309
	if (br_vlan_should_use(v)) {
		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
		vg->num_vlans--;
310 311 312
	}

	if (masterv != v) {
313 314
		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
				       br_vlan_rht_params);
315
		__vlan_del_list(v);
316 317
		kfree_rcu(v, rcu);
	}
318

319
	br_vlan_put_master(masterv);
320 321
out:
	return err;
322 323
}

324 325 326 327 328 329 330 331
static void __vlan_group_free(struct net_bridge_vlan_group *vg)
{
	WARN_ON(!list_empty(&vg->vlan_list));
	rhashtable_destroy(&vg->vlan_hash);
	kfree(vg);
}

static void __vlan_flush(struct net_bridge_vlan_group *vg)
332
{
333 334
	struct net_bridge_vlan *vlan, *tmp;

335 336
	__vlan_delete_pvid(vg, vg->pvid);
	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
337
		__vlan_del(vlan);
338 339
}

340
struct sk_buff *br_handle_vlan(struct net_bridge *br,
341
			       struct net_bridge_vlan_group *vg,
342
			       struct sk_buff *skb)
343
{
344
	struct br_vlan_stats *stats;
345
	struct net_bridge_vlan *v;
346 347
	u16 vid;

348 349
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
350 351
		goto out;

352 353 354 355 356 357 358
	/* At this point, we know that the frame was filtered and contains
	 * a valid vlan id.  If the vlan id has untagged flag set,
	 * send untagged; otherwise, send tagged.
	 */
	br_vlan_get_tag(skb, &vid);
	v = br_vlan_find(vg, vid);
	/* Vlan entry must be configured at this point.  The
359 360 361 362
	 * only exception is the bridge is set in promisc mode and the
	 * packet is destined for the bridge device.  In this case
	 * pass the packet as is.
	 */
363
	if (!v || !br_vlan_should_use(v)) {
364 365 366 367 368 369 370
		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
			goto out;
		} else {
			kfree_skb(skb);
			return NULL;
		}
	}
371 372 373 374 375 376 377 378
	if (br->vlan_stats_enabled) {
		stats = this_cpu_ptr(v->stats);
		u64_stats_update_begin(&stats->syncp);
		stats->tx_bytes += skb->len;
		stats->tx_packets++;
		u64_stats_update_end(&stats->syncp);
	}

379
	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
380
		skb->vlan_tci = 0;
381 382 383 384 385
out:
	return skb;
}

/* Called under RCU */
386 387
static bool __allowed_ingress(const struct net_bridge *br,
			      struct net_bridge_vlan_group *vg,
388
			      struct sk_buff *skb, u16 *vid)
389
{
390 391
	struct br_vlan_stats *stats;
	struct net_bridge_vlan *v;
392
	bool tagged;
393

394
	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
395 396 397 398
	/* If vlan tx offload is disabled on bridge device and frame was
	 * sent from vlan device on the bridge device, it does not have
	 * HW accelerated vlan tag.
	 */
399
	if (unlikely(!skb_vlan_tag_present(skb) &&
400
		     skb->protocol == br->vlan_proto)) {
401
		skb = skb_vlan_untag(skb);
402 403 404 405
		if (unlikely(!skb))
			return false;
	}

406 407
	if (!br_vlan_get_tag(skb, vid)) {
		/* Tagged frame */
408
		if (skb->vlan_proto != br->vlan_proto) {
409 410
			/* Protocol-mismatch, empty out vlan_tci for new tag */
			skb_push(skb, ETH_HLEN);
411
			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
412
							skb_vlan_tag_get(skb));
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
			if (unlikely(!skb))
				return false;

			skb_pull(skb, ETH_HLEN);
			skb_reset_mac_len(skb);
			*vid = 0;
			tagged = false;
		} else {
			tagged = true;
		}
	} else {
		/* Untagged frame */
		tagged = false;
	}

428
	if (!*vid) {
429 430
		u16 pvid = br_get_pvid(vg);

431 432 433
		/* Frame had a tag with VID 0 or did not have a tag.
		 * See if pvid is set on this port.  That tells us which
		 * vlan untagged or priority-tagged traffic belongs to.
434
		 */
V
Vlad Yasevich 已提交
435
		if (!pvid)
436
			goto drop;
437

438 439
		/* PVID is set on this port.  Any untagged or priority-tagged
		 * ingress frame is considered to belong to this vlan.
440
		 */
441
		*vid = pvid;
442
		if (likely(!tagged))
443
			/* Untagged Frame. */
444
			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
445 446 447 448 449 450 451 452
		else
			/* Priority-tagged Frame.
			 * At this point, We know that skb->vlan_tci had
			 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
			 * We update only VID field and preserve PCP field.
			 */
			skb->vlan_tci |= pvid;

453 454 455
		/* if stats are disabled we can avoid the lookup */
		if (!br->vlan_stats_enabled)
			return true;
456
	}
457
	v = br_vlan_find(vg, *vid);
458 459 460 461 462 463 464 465 466 467 468 469 470
	if (!v || !br_vlan_should_use(v))
		goto drop;

	if (br->vlan_stats_enabled) {
		stats = this_cpu_ptr(v->stats);
		u64_stats_update_begin(&stats->syncp);
		stats->rx_bytes += skb->len;
		stats->rx_packets++;
		u64_stats_update_end(&stats->syncp);
	}

	return true;

471 472
drop:
	kfree_skb(skb);
473 474 475
	return false;
}

476 477 478
bool br_allowed_ingress(const struct net_bridge *br,
			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
			u16 *vid)
479 480 481 482 483 484 485 486 487
{
	/* If VLAN filtering is disabled on the bridge, all packets are
	 * permitted.
	 */
	if (!br->vlan_enabled) {
		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
		return true;
	}

488
	return __allowed_ingress(br, vg, skb, vid);
489 490
}

491
/* Called under RCU. */
492
bool br_allowed_egress(struct net_bridge_vlan_group *vg,
493 494
		       const struct sk_buff *skb)
{
495
	const struct net_bridge_vlan *v;
496 497
	u16 vid;

498 499
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
500 501 502
		return true;

	br_vlan_get_tag(skb, &vid);
503 504
	v = br_vlan_find(vg, vid);
	if (v && br_vlan_should_use(v))
505 506 507 508 509
		return true;

	return false;
}

510 511 512
/* Called under RCU */
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
{
513
	struct net_bridge_vlan_group *vg;
514 515
	struct net_bridge *br = p->br;

516
	/* If filtering was disabled at input, let it pass. */
517
	if (!br->vlan_enabled)
518 519
		return true;

520
	vg = nbp_vlan_group_rcu(p);
521
	if (!vg || !vg->num_vlans)
522 523
		return false;

524 525 526
	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
		*vid = 0;

527
	if (!*vid) {
528
		*vid = br_get_pvid(vg);
V
Vlad Yasevich 已提交
529
		if (!*vid)
530 531 532 533 534
			return false;

		return true;
	}

535
	if (br_vlan_find(vg, *vid))
536 537 538 539 540
		return true;

	return false;
}

541 542 543
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
544
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
545
{
546
	struct net_bridge_vlan_group *vg;
547 548
	struct net_bridge_vlan *vlan;
	int ret;
549 550 551

	ASSERT_RTNL();

552 553
	vg = br_vlan_group(br);
	vlan = br_vlan_find(vg, vid);
554 555 556 557 558 559 560 561 562 563 564 565 566 567
	if (vlan) {
		if (!br_vlan_is_brentry(vlan)) {
			/* Trying to change flags of non-existent bridge vlan */
			if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
				return -EINVAL;
			/* It was only kept for port vlans, now make it real */
			ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
					    vlan->vid);
			if (ret) {
				br_err(br, "failed insert local address into bridge forwarding table\n");
				return ret;
			}
			atomic_inc(&vlan->refcnt);
			vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
568
			vg->num_vlans++;
569 570 571 572
		}
		__vlan_add_flags(vlan, flags);
		return 0;
	}
573

574 575
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
576 577
		return -ENOMEM;

578 579 580 581 582
	vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
	if (!vlan->stats) {
		kfree(vlan);
		return -ENOMEM;
	}
583 584 585 586 587 588 589
	vlan->vid = vid;
	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
	vlan->br = br;
	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
		atomic_set(&vlan->refcnt, 1);
	ret = __vlan_add(vlan, flags);
590 591
	if (ret) {
		free_percpu(vlan->stats);
592
		kfree(vlan);
593
	}
594

595
	return ret;
596 597
}

598 599 600
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
601 602
int br_vlan_delete(struct net_bridge *br, u16 vid)
{
603
	struct net_bridge_vlan_group *vg;
604
	struct net_bridge_vlan *v;
605 606 607

	ASSERT_RTNL();

608 609
	vg = br_vlan_group(br);
	v = br_vlan_find(vg, vid);
610 611
	if (!v || !br_vlan_is_brentry(v))
		return -ENOENT;
612

613
	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
614
	br_fdb_delete_by_port(br, NULL, vid, 0);
615

616
	return __vlan_del(v);
617 618 619 620
}

void br_vlan_flush(struct net_bridge *br)
{
621 622
	struct net_bridge_vlan_group *vg;

623 624
	ASSERT_RTNL();

625 626 627 628 629
	vg = br_vlan_group(br);
	__vlan_flush(vg);
	RCU_INIT_POINTER(br->vlgrp, NULL);
	synchronize_rcu();
	__vlan_group_free(vg);
630 631
}

632
struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
633
{
634 635
	if (!vg)
		return NULL;
636

637
	return br_vlan_lookup(&vg->vlan_hash, vid);
638 639
}

640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
/* Must be protected by RTNL. */
static void recalculate_group_addr(struct net_bridge *br)
{
	if (br->group_addr_set)
		return;

	spin_lock_bh(&br->lock);
	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
		/* Bridge Group Address */
		br->group_addr[5] = 0x00;
	} else { /* vlan_enabled && ETH_P_8021AD */
		/* Provider Bridge Group Address */
		br->group_addr[5] = 0x08;
	}
	spin_unlock_bh(&br->lock);
}

/* Must be protected by RTNL. */
void br_recalculate_fwd_mask(struct net_bridge *br)
{
	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
	else /* vlan_enabled && ETH_P_8021AD */
		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
					      ~(1u << br->group_addr[5]);
}

667
int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
668
{
669 670 671 672 673 674 675 676
	struct switchdev_attr attr = {
		.orig_dev = br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
		.u.vlan_filtering = val,
	};
	int err;

677
	if (br->vlan_enabled == val)
678
		return 0;
679

680 681 682 683
	err = switchdev_port_attr_set(br->dev, &attr);
	if (err && err != -EOPNOTSUPP)
		return err;

684
	br->vlan_enabled = val;
685
	br_manage_promisc(br);
686 687
	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);
688

689 690 691 692 693
	return 0;
}

int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
{
694
	return __br_vlan_filter_toggle(br, val);
695 696
}

697
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
698 699 700
{
	int err = 0;
	struct net_bridge_port *p;
701
	struct net_bridge_vlan *vlan;
702
	struct net_bridge_vlan_group *vg;
703
	__be16 oldproto;
704 705

	if (br->vlan_proto == proto)
706
		return 0;
707 708 709

	/* Add VLANs for the new proto to the device filter. */
	list_for_each_entry(p, &br->port_list, list) {
710 711
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
712
			err = vlan_vid_add(p->dev, proto, vlan->vid);
713 714 715 716 717 718 719 720 721 722 723 724
			if (err)
				goto err_filt;
		}
	}

	oldproto = br->vlan_proto;
	br->vlan_proto = proto;

	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);

	/* Delete VLANs for the old proto from the device filter. */
725 726 727
	list_for_each_entry(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist)
728
			vlan_vid_del(p->dev, oldproto, vlan->vid);
729
	}
730

731
	return 0;
732 733

err_filt:
734
	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
735
		vlan_vid_del(p->dev, proto, vlan->vid);
736

737 738 739
	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist)
740
			vlan_vid_del(p->dev, proto, vlan->vid);
741
	}
742

743 744 745 746 747 748 749 750
	return err;
}

int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
{
	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
		return -EPROTONOSUPPORT;

751
	return __br_vlan_set_proto(br, htons(val));
752 753
}

754 755 756 757 758 759 760 761 762 763 764 765 766 767
int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
{
	switch (val) {
	case 0:
	case 1:
		br->vlan_stats_enabled = val;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

768
static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
769
{
770 771
	struct net_bridge_vlan *v;

772
	if (vid != vg->pvid)
773 774 775 776 777 778 779 780
		return false;

	v = br_vlan_lookup(&vg->vlan_hash, vid);
	if (v && br_vlan_should_use(v) &&
	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
		return true;

	return false;
781 782 783 784 785 786 787 788 789 790
}

static void br_vlan_disable_default_pvid(struct net_bridge *br)
{
	struct net_bridge_port *p;
	u16 pvid = br->default_pvid;

	/* Disable default_pvid on all ports where it is still
	 * configured.
	 */
791
	if (vlan_default_pvid(br_vlan_group(br), pvid))
792 793 794
		br_vlan_delete(br, pvid);

	list_for_each_entry(p, &br->port_list, list) {
795
		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
796 797 798 799 800 801
			nbp_vlan_delete(p, pvid);
	}

	br->default_pvid = 0;
}

802
int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
803
{
804
	const struct net_bridge_vlan *pvent;
805
	struct net_bridge_vlan_group *vg;
806 807 808 809 810
	struct net_bridge_port *p;
	u16 old_pvid;
	int err = 0;
	unsigned long *changed;

811 812 813 814 815
	if (!pvid) {
		br_vlan_disable_default_pvid(br);
		return 0;
	}

816 817 818 819 820 821 822 823 824 825
	changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
			  GFP_KERNEL);
	if (!changed)
		return -ENOMEM;

	old_pvid = br->default_pvid;

	/* Update default_pvid config only if we do not conflict with
	 * user configuration.
	 */
826 827 828
	vg = br_vlan_group(br);
	pvent = br_vlan_find(vg, pvid);
	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
829
	    (!pvent || !br_vlan_should_use(pvent))) {
830 831
		err = br_vlan_add(br, pvid,
				  BRIDGE_VLAN_INFO_PVID |
832 833
				  BRIDGE_VLAN_INFO_UNTAGGED |
				  BRIDGE_VLAN_INFO_BRENTRY);
834 835 836 837 838 839 840 841 842 843
		if (err)
			goto out;
		br_vlan_delete(br, old_pvid);
		set_bit(0, changed);
	}

	list_for_each_entry(p, &br->port_list, list) {
		/* Update default_pvid config only if we do not conflict with
		 * user configuration.
		 */
844
		vg = nbp_vlan_group(p);
845
		if ((old_pvid &&
846 847
		     !vlan_default_pvid(vg, old_pvid)) ||
		    br_vlan_find(vg, pvid))
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
			continue;

		err = nbp_vlan_add(p, pvid,
				   BRIDGE_VLAN_INFO_PVID |
				   BRIDGE_VLAN_INFO_UNTAGGED);
		if (err)
			goto err_port;
		nbp_vlan_delete(p, old_pvid);
		set_bit(p->port_no, changed);
	}

	br->default_pvid = pvid;

out:
	kfree(changed);
	return err;

err_port:
	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
		if (!test_bit(p->port_no, changed))
			continue;

		if (old_pvid)
			nbp_vlan_add(p, old_pvid,
				     BRIDGE_VLAN_INFO_PVID |
				     BRIDGE_VLAN_INFO_UNTAGGED);
		nbp_vlan_delete(p, pvid);
	}

	if (test_bit(0, changed)) {
		if (old_pvid)
			br_vlan_add(br, old_pvid,
				    BRIDGE_VLAN_INFO_PVID |
881 882
				    BRIDGE_VLAN_INFO_UNTAGGED |
				    BRIDGE_VLAN_INFO_BRENTRY);
883 884 885 886 887
		br_vlan_delete(br, pvid);
	}
	goto out;
}

888 889 890 891 892
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
{
	u16 pvid = val;
	int err = 0;

893
	if (val >= VLAN_VID_MASK)
894 895 896
		return -EINVAL;

	if (pvid == br->default_pvid)
897
		goto out;
898 899 900 901 902

	/* Only allow default pvid change when filtering is disabled */
	if (br->vlan_enabled) {
		pr_info_once("Please disable vlan filtering to change default_pvid\n");
		err = -EPERM;
903
		goto out;
904
	}
905
	err = __br_vlan_set_default_pvid(br, pvid);
906
out:
907 908 909
	return err;
}

910
int br_vlan_init(struct net_bridge *br)
911
{
912
	struct net_bridge_vlan_group *vg;
913 914
	int ret = -ENOMEM;

915 916
	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
	if (!vg)
917
		goto out;
918
	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
919 920
	if (ret)
		goto err_rhtbl;
921
	INIT_LIST_HEAD(&vg->vlan_list);
922
	br->vlan_proto = htons(ETH_P_8021Q);
923
	br->default_pvid = 1;
924
	rcu_assign_pointer(br->vlgrp, vg);
925 926 927 928 929 930 931 932 933 934
	ret = br_vlan_add(br, 1,
			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
			  BRIDGE_VLAN_INFO_BRENTRY);
	if (ret)
		goto err_vlan_add;

out:
	return ret;

err_vlan_add:
935
	rhashtable_destroy(&vg->vlan_hash);
936
err_rhtbl:
937
	kfree(vg);
938 939 940 941 942 943

	goto out;
}

int nbp_vlan_init(struct net_bridge_port *p)
{
944 945 946 947 948 949
	struct switchdev_attr attr = {
		.orig_dev = p->br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
		.u.vlan_filtering = p->br->vlan_enabled,
	};
950
	struct net_bridge_vlan_group *vg;
951 952
	int ret = -ENOMEM;

953 954
	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
	if (!vg)
955 956
		goto out;

957 958 959 960
	ret = switchdev_port_attr_set(p->dev, &attr);
	if (ret && ret != -EOPNOTSUPP)
		goto err_vlan_enabled;

961
	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
962 963
	if (ret)
		goto err_rhtbl;
964
	INIT_LIST_HEAD(&vg->vlan_list);
965
	rcu_assign_pointer(p->vlgrp, vg);
966 967 968 969 970 971 972 973 974 975 976
	if (p->br->default_pvid) {
		ret = nbp_vlan_add(p, p->br->default_pvid,
				   BRIDGE_VLAN_INFO_PVID |
				   BRIDGE_VLAN_INFO_UNTAGGED);
		if (ret)
			goto err_vlan_add;
	}
out:
	return ret;

err_vlan_add:
977 978
	RCU_INIT_POINTER(p->vlgrp, NULL);
	synchronize_rcu();
979
	rhashtable_destroy(&vg->vlan_hash);
980
err_vlan_enabled:
981
err_rhtbl:
982
	kfree(vg);
983 984

	goto out;
985 986
}

987 988 989
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
990
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
991
{
992 993 994 995 996 997 998
	struct switchdev_obj_port_vlan v = {
		.obj.orig_dev = port->dev,
		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
		.flags = flags,
		.vid_begin = vid,
		.vid_end = vid,
	};
999 1000
	struct net_bridge_vlan *vlan;
	int ret;
1001 1002 1003

	ASSERT_RTNL();

1004
	vlan = br_vlan_find(nbp_vlan_group(port), vid);
1005
	if (vlan) {
1006 1007 1008 1009
		/* Pass the flags to the hardware bridge */
		ret = switchdev_port_obj_add(port->dev, &v.obj);
		if (ret && ret != -EOPNOTSUPP)
			return ret;
1010 1011
		__vlan_add_flags(vlan, flags);
		return 0;
1012 1013
	}

1014 1015 1016
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
		return -ENOMEM;
1017

1018 1019 1020 1021 1022
	vlan->vid = vid;
	vlan->port = port;
	ret = __vlan_add(vlan, flags);
	if (ret)
		kfree(vlan);
1023

1024
	return ret;
1025 1026
}

1027 1028 1029
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
1030 1031
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{
1032
	struct net_bridge_vlan *v;
1033 1034 1035

	ASSERT_RTNL();

1036
	v = br_vlan_find(nbp_vlan_group(port), vid);
1037 1038
	if (!v)
		return -ENOENT;
1039
	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1040
	br_fdb_delete_by_port(port->br, port, vid, 0);
1041

1042
	return __vlan_del(v);
1043 1044 1045 1046
}

void nbp_vlan_flush(struct net_bridge_port *port)
{
1047 1048
	struct net_bridge_vlan_group *vg;

1049 1050
	ASSERT_RTNL();

1051 1052 1053 1054 1055
	vg = nbp_vlan_group(port);
	__vlan_flush(vg);
	RCU_INIT_POINTER(port->vlgrp, NULL);
	synchronize_rcu();
	__vlan_group_free(vg);
1056
}
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083

void br_vlan_get_stats(const struct net_bridge_vlan *v,
		       struct br_vlan_stats *stats)
{
	int i;

	memset(stats, 0, sizeof(*stats));
	for_each_possible_cpu(i) {
		u64 rxpackets, rxbytes, txpackets, txbytes;
		struct br_vlan_stats *cpu_stats;
		unsigned int start;

		cpu_stats = per_cpu_ptr(v->stats, i);
		do {
			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
			rxpackets = cpu_stats->rx_packets;
			rxbytes = cpu_stats->rx_bytes;
			txbytes = cpu_stats->tx_bytes;
			txpackets = cpu_stats->tx_packets;
		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));

		stats->rx_packets += rxpackets;
		stats->rx_bytes += rxbytes;
		stats->tx_bytes += txbytes;
		stats->tx_packets += txpackets;
	}
}