br_vlan.c 54.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
6
#include <net/switchdev.h>
7 8

#include "br_private.h"
9
#include "br_private_tunnel.h"
10

11 12
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);

13 14
static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
			      const void *ptr)
15
{
16 17 18 19 20 21 22 23 24 25
	const struct net_bridge_vlan *vle = ptr;
	u16 vid = *(u16 *)arg->key;

	return vle->vid != vid;
}

static const struct rhashtable_params br_vlan_rht_params = {
	.head_offset = offsetof(struct net_bridge_vlan, vnode),
	.key_offset = offsetof(struct net_bridge_vlan, vid),
	.key_len = sizeof(u16),
26
	.nelem_hint = 3,
27 28 29 30 31 32 33 34 35 36
	.max_size = VLAN_N_VID,
	.obj_cmpfn = br_vlan_cmp,
	.automatic_shrinking = true,
};

static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
{
	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
}

37
static void __vlan_add_pvid(struct net_bridge_vlan_group *vg,
38
			    const struct net_bridge_vlan *v)
39
{
40
	if (vg->pvid == v->vid)
41
		return;
42 43

	smp_wmb();
44 45
	br_vlan_set_pvid_state(vg, v->state);
	vg->pvid = v->vid;
46 47
}

48
static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
49
{
50
	if (vg->pvid != vid)
51
		return;
52 53

	smp_wmb();
54
	vg->pvid = 0;
55 56
}

57 58 59
/* Update the BRIDGE_VLAN_INFO_PVID and BRIDGE_VLAN_INFO_UNTAGGED flags of @v.
 * If @commit is false, return just whether the BRIDGE_VLAN_INFO_PVID and
 * BRIDGE_VLAN_INFO_UNTAGGED bits of @flags would produce any change onto @v.
60
 */
61 62
static bool __vlan_flags_update(struct net_bridge_vlan *v, u16 flags,
				bool commit)
63
{
64
	struct net_bridge_vlan_group *vg;
65
	bool change;
66 67

	if (br_vlan_is_master(v))
68
		vg = br_vlan_group(v->br);
69
	else
70
		vg = nbp_vlan_group(v->port);
71

72 73 74 75 76 77 78
	/* check if anything would be changed on commit */
	change = !!(flags & BRIDGE_VLAN_INFO_PVID) == !!(vg->pvid != v->vid) ||
		 ((flags ^ v->flags) & BRIDGE_VLAN_INFO_UNTAGGED);

	if (!commit)
		goto out;

79
	if (flags & BRIDGE_VLAN_INFO_PVID)
80
		__vlan_add_pvid(vg, v);
81
	else
82
		__vlan_delete_pvid(vg, v->vid);
83 84

	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
85
		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
86
	else
87
		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
88

89 90 91 92 93 94 95 96 97 98 99 100
out:
	return change;
}

static bool __vlan_flags_would_change(struct net_bridge_vlan *v, u16 flags)
{
	return __vlan_flags_update(v, flags, false);
}

static void __vlan_flags_commit(struct net_bridge_vlan *v, u16 flags)
{
	__vlan_flags_update(v, flags, true);
101 102
}

103
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
104 105
			  struct net_bridge_vlan *v, u16 flags,
			  struct netlink_ext_ack *extack)
106 107 108
{
	int err;

109 110
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q add.
111
	 */
112
	err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
113
	if (err == -EOPNOTSUPP)
114 115
		return vlan_vid_add(dev, br->vlan_proto, v->vid);
	v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
116 117 118
	return err;
}

119
static void __vlan_add_list(struct net_bridge_vlan *v)
120
{
121
	struct net_bridge_vlan_group *vg;
122 123
	struct list_head *headp, *hpos;
	struct net_bridge_vlan *vent;
124

125 126 127 128 129 130
	if (br_vlan_is_master(v))
		vg = br_vlan_group(v->br);
	else
		vg = nbp_vlan_group(v->port);

	headp = &vg->vlan_list;
131 132
	list_for_each_prev(hpos, headp) {
		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
133
		if (v->vid >= vent->vid)
134
			break;
135
	}
136
	list_add_rcu(&v->vlist, hpos);
137
}
138

139 140
static void __vlan_del_list(struct net_bridge_vlan *v)
{
141
	list_del_rcu(&v->vlist);
142 143
}

144
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
145
			  const struct net_bridge_vlan *v)
146
{
147
	int err;
148

149 150
	/* Try switchdev op first. In case it is not supported, fallback to
	 * 8021q del.
151
	 */
152 153 154 155
	err = br_switchdev_port_vlan_del(dev, v->vid);
	if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
		vlan_vid_del(dev, br->vlan_proto, v->vid);
	return err == -EOPNOTSUPP ? 0 : err;
156 157
}

158
/* Returns a master vlan, if it didn't exist it gets created. In all cases
159 160
 * a reference is taken to the master vlan before returning.
 */
161 162 163
static struct net_bridge_vlan *
br_vlan_get_master(struct net_bridge *br, u16 vid,
		   struct netlink_ext_ack *extack)
164
{
165
	struct net_bridge_vlan_group *vg;
166 167
	struct net_bridge_vlan *masterv;

168 169
	vg = br_vlan_group(br);
	masterv = br_vlan_find(vg, vid);
170
	if (!masterv) {
171 172
		bool changed;

173
		/* missing global ctx, create it now */
174
		if (br_vlan_add(br, vid, 0, &changed, extack))
175
			return NULL;
176
		masterv = br_vlan_find(vg, vid);
177 178
		if (WARN_ON(!masterv))
			return NULL;
179 180
		refcount_set(&masterv->refcnt, 1);
		return masterv;
181
	}
182
	refcount_inc(&masterv->refcnt);
183 184 185 186

	return masterv;
}

187 188 189 190 191 192 193 194 195 196 197
static void br_master_vlan_rcu_free(struct rcu_head *rcu)
{
	struct net_bridge_vlan *v;

	v = container_of(rcu, struct net_bridge_vlan, rcu);
	WARN_ON(!br_vlan_is_master(v));
	free_percpu(v->stats);
	v->stats = NULL;
	kfree(v);
}

198 199
static void br_vlan_put_master(struct net_bridge_vlan *masterv)
{
200 201
	struct net_bridge_vlan_group *vg;

202 203 204
	if (!br_vlan_is_master(masterv))
		return;

205
	vg = br_vlan_group(masterv->br);
206
	if (refcount_dec_and_test(&masterv->refcnt)) {
207
		rhashtable_remove_fast(&vg->vlan_hash,
208 209
				       &masterv->vnode, br_vlan_rht_params);
		__vlan_del_list(masterv);
210
		br_multicast_toggle_one_vlan(masterv, false);
211
		br_multicast_ctx_deinit(&masterv->br_mcast_ctx);
212
		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
213 214 215
	}
}

216 217 218 219 220 221 222
static void nbp_vlan_rcu_free(struct rcu_head *rcu)
{
	struct net_bridge_vlan *v;

	v = container_of(rcu, struct net_bridge_vlan, rcu);
	WARN_ON(br_vlan_is_master(v));
	/* if we had per-port stats configured then free them here */
223
	if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
224 225 226 227 228
		free_percpu(v->stats);
	v->stats = NULL;
	kfree(v);
}

229 230 231 232
/* This is the shared VLAN add function which works for both ports and bridge
 * devices. There are four possible calls to this function in terms of the
 * vlan entry type:
 * 1. vlan is being added on a port (no master flags, global entry exists)
233
 * 2. vlan is being added on a bridge (both master and brentry flags)
234
 * 3. vlan is being added on a port, but a global entry didn't exist which
235
 *    is being created right now (master flag set, brentry flag unset), the
236
 *    global entry is used for global per-vlan features, but not for filtering
237
 * 4. same as 3 but with both master and brentry flags set so the entry
238 239
 *    will be used for filtering in both the port and the bridge
 */
240 241
static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
		      struct netlink_ext_ack *extack)
242
{
243 244
	struct net_bridge_vlan *masterv = NULL;
	struct net_bridge_port *p = NULL;
245
	struct net_bridge_vlan_group *vg;
246 247 248 249 250 251 252
	struct net_device *dev;
	struct net_bridge *br;
	int err;

	if (br_vlan_is_master(v)) {
		br = v->br;
		dev = br->dev;
253
		vg = br_vlan_group(br);
254 255 256 257
	} else {
		p = v->port;
		br = p->br;
		dev = p->dev;
258
		vg = nbp_vlan_group(p);
259 260 261 262 263 264 265
	}

	if (p) {
		/* Add VLAN to the device filter if it is supported.
		 * This ensures tagged traffic enters the bridge when
		 * promiscuous mode is disabled by br_manage_promisc().
		 */
266
		err = __vlan_vid_add(dev, br, v, flags, extack);
267 268 269 270 271
		if (err)
			goto out;

		/* need to work on the master vlan too */
		if (flags & BRIDGE_VLAN_INFO_MASTER) {
272 273 274 275
			bool changed;

			err = br_vlan_add(br, v->vid,
					  flags | BRIDGE_VLAN_INFO_BRENTRY,
276
					  &changed, extack);
277 278
			if (err)
				goto out_filt;
279 280 281 282

			if (changed)
				br_vlan_notify(br, NULL, v->vid, 0,
					       RTM_NEWVLAN);
283 284
		}

285
		masterv = br_vlan_get_master(br, v->vid, extack);
286 287
		if (!masterv) {
			err = -ENOMEM;
288
			goto out_filt;
289
		}
290
		v->brvlan = masterv;
291
		if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
292 293
			v->stats =
			     netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
294 295 296 297
			if (!v->stats) {
				err = -ENOMEM;
				goto out_filt;
			}
298
			v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
299 300 301
		} else {
			v->stats = masterv->stats;
		}
302
		br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx);
303
	} else {
304 305 306 307 308 309
		if (br_vlan_should_use(v)) {
			err = br_switchdev_port_vlan_add(dev, v->vid, flags,
							 extack);
			if (err && err != -EOPNOTSUPP)
				goto out;
		}
310
		br_multicast_ctx_init(br, v, &v->br_mcast_ctx);
311
		v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
312 313
	}

314
	/* Add the dev mac and count the vlan only if it's usable */
315
	if (br_vlan_should_use(v)) {
316
		err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
317 318 319 320
		if (err) {
			br_err(br, "failed insert local address into bridge forwarding table\n");
			goto out_filt;
		}
321
		vg->num_vlans++;
322 323
	}

324 325 326
	/* set the state before publishing */
	v->state = BR_STATE_FORWARDING;

327 328
	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
					    br_vlan_rht_params);
329 330
	if (err)
		goto out_fdb_insert;
331

332
	__vlan_add_list(v);
333
	__vlan_flags_commit(v, flags);
334
	br_multicast_toggle_one_vlan(v, true);
335 336 337

	if (p)
		nbp_vlan_set_vlan_dev_state(p, v->vid);
338 339 340 341
out:
	return err;

out_fdb_insert:
342 343 344 345
	if (br_vlan_should_use(v)) {
		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
		vg->num_vlans--;
	}
346 347 348

out_filt:
	if (p) {
349
		__vlan_vid_del(dev, br, v);
350
		if (masterv) {
351 352 353 354
			if (v->stats && masterv->stats != v->stats)
				free_percpu(v->stats);
			v->stats = NULL;

355
			br_vlan_put_master(masterv);
356 357
			v->brvlan = NULL;
		}
358 359
	} else {
		br_switchdev_port_vlan_del(dev, v->vid);
360 361 362 363 364 365 366 367
	}

	goto out;
}

static int __vlan_del(struct net_bridge_vlan *v)
{
	struct net_bridge_vlan *masterv = v;
368
	struct net_bridge_vlan_group *vg;
369 370
	struct net_bridge_port *p = NULL;
	int err = 0;
371

372
	if (br_vlan_is_master(v)) {
373
		vg = br_vlan_group(v->br);
374 375
	} else {
		p = v->port;
376
		vg = nbp_vlan_group(v->port);
377 378
		masterv = v->brvlan;
	}
379

380
	__vlan_delete_pvid(vg, v->vid);
381
	if (p) {
382
		err = __vlan_vid_del(p->dev, p->br, v);
383
		if (err)
384
			goto out;
385 386 387 388 389
	} else {
		err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
		if (err && err != -EOPNOTSUPP)
			goto out;
		err = 0;
390
	}
391

392 393 394
	if (br_vlan_should_use(v)) {
		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
		vg->num_vlans--;
395 396 397
	}

	if (masterv != v) {
398
		vlan_tunnel_info_del(vg, v);
399 400
		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
				       br_vlan_rht_params);
401
		__vlan_del_list(v);
402
		nbp_vlan_set_vlan_dev_state(p, v->vid);
403
		br_multicast_toggle_one_vlan(v, false);
404
		br_multicast_port_ctx_deinit(&v->port_mcast_ctx);
405
		call_rcu(&v->rcu, nbp_vlan_rcu_free);
406
	}
407

408
	br_vlan_put_master(masterv);
409 410
out:
	return err;
411 412
}

413 414 415 416
static void __vlan_group_free(struct net_bridge_vlan_group *vg)
{
	WARN_ON(!list_empty(&vg->vlan_list));
	rhashtable_destroy(&vg->vlan_hash);
417
	vlan_tunnel_deinit(vg);
418 419 420
	kfree(vg);
}

421 422 423
static void __vlan_flush(const struct net_bridge *br,
			 const struct net_bridge_port *p,
			 struct net_bridge_vlan_group *vg)
424
{
425
	struct net_bridge_vlan *vlan, *tmp;
426
	u16 v_start = 0, v_end = 0;
427
	int err;
428

429
	__vlan_delete_pvid(vg, vg->pvid);
430 431 432 433 434 435 436 437 438 439 440
	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
		/* take care of disjoint ranges */
		if (!v_start) {
			v_start = vlan->vid;
		} else if (vlan->vid - v_end != 1) {
			/* found range end, notify and start next one */
			br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
			v_start = vlan->vid;
		}
		v_end = vlan->vid;

441 442 443 444 445 446 447
		err = __vlan_del(vlan);
		if (err) {
			br_err(br,
			       "port %u(%s) failed to delete vlan %d: %pe\n",
			       (unsigned int) p->port_no, p->dev->name,
			       vlan->vid, ERR_PTR(err));
		}
448 449 450 451 452
	}

	/* notify about the last/whole vlan range */
	if (v_start)
		br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
453 454
}

455
struct sk_buff *br_handle_vlan(struct net_bridge *br,
456
			       const struct net_bridge_port *p,
457
			       struct net_bridge_vlan_group *vg,
458
			       struct sk_buff *skb)
459
{
460
	struct pcpu_sw_netstats *stats;
461
	struct net_bridge_vlan *v;
462 463
	u16 vid;

464 465
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
466 467
		goto out;

468 469 470 471 472 473 474
	/* At this point, we know that the frame was filtered and contains
	 * a valid vlan id.  If the vlan id has untagged flag set,
	 * send untagged; otherwise, send tagged.
	 */
	br_vlan_get_tag(skb, &vid);
	v = br_vlan_find(vg, vid);
	/* Vlan entry must be configured at this point.  The
475 476 477 478
	 * only exception is the bridge is set in promisc mode and the
	 * packet is destined for the bridge device.  In this case
	 * pass the packet as is.
	 */
479
	if (!v || !br_vlan_should_use(v)) {
480 481 482 483 484 485 486
		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
			goto out;
		} else {
			kfree_skb(skb);
			return NULL;
		}
	}
487
	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
488 489 490 491 492 493 494
		stats = this_cpu_ptr(v->stats);
		u64_stats_update_begin(&stats->syncp);
		stats->tx_bytes += skb->len;
		stats->tx_packets++;
		u64_stats_update_end(&stats->syncp);
	}

495 496 497 498 499 500 501 502 503
	/* If the skb will be sent using forwarding offload, the assumption is
	 * that the switchdev will inject the packet into hardware together
	 * with the bridge VLAN, so that it can be forwarded according to that
	 * VLAN. The switchdev should deal with popping the VLAN header in
	 * hardware on each egress port as appropriate. So only strip the VLAN
	 * header if forwarding offload is not being used.
	 */
	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED &&
	    !br_switchdev_frame_uses_tx_fwd_offload(skb))
504
		__vlan_hwaccel_clear_tag(skb);
505 506 507 508 509 510

	if (p && (p->flags & BR_VLAN_TUNNEL) &&
	    br_handle_egress_vlan_tunnel(skb, v)) {
		kfree_skb(skb);
		return NULL;
	}
511 512 513 514 515
out:
	return skb;
}

/* Called under RCU */
516 517
static bool __allowed_ingress(const struct net_bridge *br,
			      struct net_bridge_vlan_group *vg,
518
			      struct sk_buff *skb, u16 *vid,
519 520
			      u8 *state,
			      struct net_bridge_vlan **vlan)
521
{
522
	struct pcpu_sw_netstats *stats;
523
	struct net_bridge_vlan *v;
524
	bool tagged;
525

526
	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
527 528 529 530
	/* If vlan tx offload is disabled on bridge device and frame was
	 * sent from vlan device on the bridge device, it does not have
	 * HW accelerated vlan tag.
	 */
531
	if (unlikely(!skb_vlan_tag_present(skb) &&
532
		     skb->protocol == br->vlan_proto)) {
533
		skb = skb_vlan_untag(skb);
534 535 536 537
		if (unlikely(!skb))
			return false;
	}

538 539
	if (!br_vlan_get_tag(skb, vid)) {
		/* Tagged frame */
540
		if (skb->vlan_proto != br->vlan_proto) {
541 542
			/* Protocol-mismatch, empty out vlan_tci for new tag */
			skb_push(skb, ETH_HLEN);
543
			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
544
							skb_vlan_tag_get(skb));
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
			if (unlikely(!skb))
				return false;

			skb_pull(skb, ETH_HLEN);
			skb_reset_mac_len(skb);
			*vid = 0;
			tagged = false;
		} else {
			tagged = true;
		}
	} else {
		/* Untagged frame */
		tagged = false;
	}

560
	if (!*vid) {
561 562
		u16 pvid = br_get_pvid(vg);

563 564 565
		/* Frame had a tag with VID 0 or did not have a tag.
		 * See if pvid is set on this port.  That tells us which
		 * vlan untagged or priority-tagged traffic belongs to.
566
		 */
V
Vlad Yasevich 已提交
567
		if (!pvid)
568
			goto drop;
569

570 571
		/* PVID is set on this port.  Any untagged or priority-tagged
		 * ingress frame is considered to belong to this vlan.
572
		 */
573
		*vid = pvid;
574
		if (likely(!tagged))
575
			/* Untagged Frame. */
576
			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
577 578
		else
			/* Priority-tagged Frame.
579 580
			 * At this point, we know that skb->vlan_tci VID
			 * field was 0.
581 582 583 584
			 * We update only VID field and preserve PCP field.
			 */
			skb->vlan_tci |= pvid;

585 586 587
		/* if snooping and stats are disabled we can avoid the lookup */
		if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
		    !br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
588 589
			if (*state == BR_STATE_FORWARDING) {
				*state = br_vlan_get_pvid_state(vg);
590 591
				if (!br_vlan_state_allowed(*state, true))
					goto drop;
592
			}
593
			return true;
594
		}
595
	}
596
	v = br_vlan_find(vg, *vid);
597 598 599
	if (!v || !br_vlan_should_use(v))
		goto drop;

600 601 602 603 604 605
	if (*state == BR_STATE_FORWARDING) {
		*state = br_vlan_get_state(v);
		if (!br_vlan_state_allowed(*state, true))
			goto drop;
	}

606
	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
607 608 609 610 611 612 613
		stats = this_cpu_ptr(v->stats);
		u64_stats_update_begin(&stats->syncp);
		stats->rx_bytes += skb->len;
		stats->rx_packets++;
		u64_stats_update_end(&stats->syncp);
	}

614 615
	*vlan = v;

616 617
	return true;

618 619
drop:
	kfree_skb(skb);
620 621 622
	return false;
}

623 624
bool br_allowed_ingress(const struct net_bridge *br,
			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
625 626
			u16 *vid, u8 *state,
			struct net_bridge_vlan **vlan)
627 628 629 630
{
	/* If VLAN filtering is disabled on the bridge, all packets are
	 * permitted.
	 */
631
	*vlan = NULL;
632
	if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
633 634 635 636
		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
		return true;
	}

637
	return __allowed_ingress(br, vg, skb, vid, state, vlan);
638 639
}

640
/* Called under RCU. */
641
bool br_allowed_egress(struct net_bridge_vlan_group *vg,
642 643
		       const struct sk_buff *skb)
{
644
	const struct net_bridge_vlan *v;
645 646
	u16 vid;

647 648
	/* If this packet was not filtered at input, let it pass */
	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
649 650 651
		return true;

	br_vlan_get_tag(skb, &vid);
652
	v = br_vlan_find(vg, vid);
653 654
	if (v && br_vlan_should_use(v) &&
	    br_vlan_state_allowed(br_vlan_get_state(v), false))
655 656 657 658 659
		return true;

	return false;
}

660 661 662
/* Called under RCU */
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
{
663
	struct net_bridge_vlan_group *vg;
664
	struct net_bridge *br = p->br;
665
	struct net_bridge_vlan *v;
666

667
	/* If filtering was disabled at input, let it pass. */
668
	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
669 670
		return true;

671
	vg = nbp_vlan_group_rcu(p);
672
	if (!vg || !vg->num_vlans)
673 674
		return false;

675 676 677
	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
		*vid = 0;

678
	if (!*vid) {
679
		*vid = br_get_pvid(vg);
680 681
		if (!*vid ||
		    !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
682 683 684 685 686
			return false;

		return true;
	}

687 688
	v = br_vlan_find(vg, *vid);
	if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
689 690 691 692 693
		return true;

	return false;
}

694 695 696
static int br_vlan_add_existing(struct net_bridge *br,
				struct net_bridge_vlan_group *vg,
				struct net_bridge_vlan *vlan,
697 698
				u16 flags, bool *changed,
				struct netlink_ext_ack *extack)
699
{
700 701
	bool would_change = __vlan_flags_would_change(vlan, flags);
	bool becomes_brentry = false;
702 703
	int err;

704 705 706 707
	if (!br_vlan_is_brentry(vlan)) {
		/* Trying to change flags of non-existent bridge vlan */
		if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
			return -EINVAL;
708

709 710
		becomes_brentry = true;
	}
711

712 713 714 715 716 717 718 719 720 721 722
	/* Master VLANs that aren't brentries weren't notified before,
	 * time to notify them now.
	 */
	if (becomes_brentry || would_change) {
		err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags,
						 extack);
		if (err && err != -EOPNOTSUPP)
			return err;
	}

	if (becomes_brentry) {
723
		/* It was only kept for port vlans, now make it real */
724
		err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid);
725 726
		if (err) {
			br_err(br, "failed to insert local address into bridge forwarding table\n");
727
			goto err_fdb_insert;
728 729 730 731 732 733
		}

		refcount_inc(&vlan->refcnt);
		vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
		vg->num_vlans++;
		*changed = true;
734
		br_multicast_toggle_one_vlan(vlan, true);
735 736
	}

737 738
	__vlan_flags_commit(vlan, flags);
	if (would_change)
739 740 741
		*changed = true;

	return 0;
742 743 744 745

err_fdb_insert:
	br_switchdev_port_vlan_del(br->dev, vlan->vid);
	return err;
746 747
}

748 749
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
750
 * changed must be true only if the vlan was created or updated
751
 */
752 753
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
		struct netlink_ext_ack *extack)
754
{
755
	struct net_bridge_vlan_group *vg;
756 757
	struct net_bridge_vlan *vlan;
	int ret;
758 759 760

	ASSERT_RTNL();

761
	*changed = false;
762 763
	vg = br_vlan_group(br);
	vlan = br_vlan_find(vg, vid);
764
	if (vlan)
765 766
		return br_vlan_add_existing(br, vg, vlan, flags, changed,
					    extack);
767

768 769
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
770 771
		return -ENOMEM;

772
	vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
773 774 775 776
	if (!vlan->stats) {
		kfree(vlan);
		return -ENOMEM;
	}
777 778 779 780 781
	vlan->vid = vid;
	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
	vlan->br = br;
	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
782
		refcount_set(&vlan->refcnt, 1);
783
	ret = __vlan_add(vlan, flags, extack);
784 785
	if (ret) {
		free_percpu(vlan->stats);
786
		kfree(vlan);
787 788
	} else {
		*changed = true;
789
	}
790

791
	return ret;
792 793
}

794 795 796
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
797 798
int br_vlan_delete(struct net_bridge *br, u16 vid)
{
799
	struct net_bridge_vlan_group *vg;
800
	struct net_bridge_vlan *v;
801 802 803

	ASSERT_RTNL();

804 805
	vg = br_vlan_group(br);
	v = br_vlan_find(vg, vid);
806 807
	if (!v || !br_vlan_is_brentry(v))
		return -ENOENT;
808

809
	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
810
	br_fdb_delete_by_port(br, NULL, vid, 0);
811

812 813
	vlan_tunnel_info_del(vg, v);

814
	return __vlan_del(v);
815 816 817 818
}

void br_vlan_flush(struct net_bridge *br)
{
819 820
	struct net_bridge_vlan_group *vg;

821 822
	ASSERT_RTNL();

823
	vg = br_vlan_group(br);
824
	__vlan_flush(br, NULL, vg);
825 826 827
	RCU_INIT_POINTER(br->vlgrp, NULL);
	synchronize_rcu();
	__vlan_group_free(vg);
828 829
}

830
struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
831
{
832 833
	if (!vg)
		return NULL;
834

835
	return br_vlan_lookup(&vg->vlan_hash, vid);
836 837
}

838 839 840
/* Must be protected by RTNL. */
static void recalculate_group_addr(struct net_bridge *br)
{
841
	if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
842 843 844
		return;

	spin_lock_bh(&br->lock);
845 846
	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
	    br->vlan_proto == htons(ETH_P_8021Q)) {
847 848 849 850 851 852 853 854 855 856 857 858
		/* Bridge Group Address */
		br->group_addr[5] = 0x00;
	} else { /* vlan_enabled && ETH_P_8021AD */
		/* Provider Bridge Group Address */
		br->group_addr[5] = 0x08;
	}
	spin_unlock_bh(&br->lock);
}

/* Must be protected by RTNL. */
void br_recalculate_fwd_mask(struct net_bridge *br)
{
859 860
	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
	    br->vlan_proto == htons(ETH_P_8021Q))
861 862 863 864 865 866
		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
	else /* vlan_enabled && ETH_P_8021AD */
		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
					      ~(1u << br->group_addr[5]);
}

867 868
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val,
			  struct netlink_ext_ack *extack)
869
{
870 871 872 873 874 875 876 877
	struct switchdev_attr attr = {
		.orig_dev = br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
		.u.vlan_filtering = val,
	};
	int err;

878
	if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
879
		return 0;
880

881 882
	br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);

883
	err = switchdev_port_attr_set(br->dev, &attr, extack);
884 885
	if (err && err != -EOPNOTSUPP) {
		br_opt_toggle(br, BROPT_VLAN_ENABLED, !val);
886
		return err;
887
	}
888

889
	br_manage_promisc(br);
890 891
	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);
892 893 894 895
	if (!val && br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
		br_info(br, "vlan filtering disabled, automatically disabling multicast vlan snooping\n");
		br_multicast_toggle_vlan_snooping(br, false, NULL);
	}
896

897 898 899
	return 0;
}

900 901 902 903
bool br_vlan_enabled(const struct net_device *dev)
{
	struct net_bridge *br = netdev_priv(dev);

904
	return br_opt_get(br, BROPT_VLAN_ENABLED);
905 906 907
}
EXPORT_SYMBOL_GPL(br_vlan_enabled);

W
wenxu 已提交
908 909 910 911 912 913 914 915 916 917
int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
{
	struct net_bridge *br = netdev_priv(dev);

	*p_proto = ntohs(br->vlan_proto);

	return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_proto);

918 919
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
			struct netlink_ext_ack *extack)
920
{
921 922 923 924 925 926
	struct switchdev_attr attr = {
		.orig_dev = br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
		.u.vlan_protocol = ntohs(proto),
	};
927 928
	int err = 0;
	struct net_bridge_port *p;
929
	struct net_bridge_vlan *vlan;
930
	struct net_bridge_vlan_group *vg;
931
	__be16 oldproto = br->vlan_proto;
932 933

	if (br->vlan_proto == proto)
934
		return 0;
935

936
	err = switchdev_port_attr_set(br->dev, &attr, extack);
937 938 939
	if (err && err != -EOPNOTSUPP)
		return err;

940 941
	/* Add VLANs for the new proto to the device filter. */
	list_for_each_entry(p, &br->port_list, list) {
942 943
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
944
			err = vlan_vid_add(p->dev, proto, vlan->vid);
945 946 947 948 949 950 951 952 953 954 955
			if (err)
				goto err_filt;
		}
	}

	br->vlan_proto = proto;

	recalculate_group_addr(br);
	br_recalculate_fwd_mask(br);

	/* Delete VLANs for the old proto from the device filter. */
956 957 958
	list_for_each_entry(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist)
959
			vlan_vid_del(p->dev, oldproto, vlan->vid);
960
	}
961

962
	return 0;
963 964

err_filt:
965
	attr.u.vlan_protocol = ntohs(oldproto);
966
	switchdev_port_attr_set(br->dev, &attr, NULL);
967

968
	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
969
		vlan_vid_del(p->dev, proto, vlan->vid);
970

971 972 973
	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		list_for_each_entry(vlan, &vg->vlan_list, vlist)
974
			vlan_vid_del(p->dev, proto, vlan->vid);
975
	}
976

977 978 979
	return err;
}

980 981
int br_vlan_set_proto(struct net_bridge *br, unsigned long val,
		      struct netlink_ext_ack *extack)
982
{
983
	if (!eth_type_vlan(htons(val)))
984 985
		return -EPROTONOSUPPORT;

986
	return __br_vlan_set_proto(br, htons(val), extack);
987 988
}

989 990 991 992 993
int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
{
	switch (val) {
	case 0:
	case 1:
994
		br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
995 996
		break;
	default:
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
		return -EINVAL;
	}

	return 0;
}

int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
{
	struct net_bridge_port *p;

	/* allow to change the option if there are no port vlans configured */
	list_for_each_entry(p, &br->port_list, list) {
		struct net_bridge_vlan_group *vg = nbp_vlan_group(p);

		if (vg->num_vlans)
			return -EBUSY;
	}

	switch (val) {
	case 0:
	case 1:
		br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
		break;
	default:
1021 1022 1023 1024 1025 1026
		return -EINVAL;
	}

	return 0;
}

1027
static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
1028
{
1029 1030
	struct net_bridge_vlan *v;

1031
	if (vid != vg->pvid)
1032 1033 1034 1035 1036 1037 1038 1039
		return false;

	v = br_vlan_lookup(&vg->vlan_hash, vid);
	if (v && br_vlan_should_use(v) &&
	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
		return true;

	return false;
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
}

static void br_vlan_disable_default_pvid(struct net_bridge *br)
{
	struct net_bridge_port *p;
	u16 pvid = br->default_pvid;

	/* Disable default_pvid on all ports where it is still
	 * configured.
	 */
1050 1051 1052 1053
	if (vlan_default_pvid(br_vlan_group(br), pvid)) {
		if (!br_vlan_delete(br, pvid))
			br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
	}
1054 1055

	list_for_each_entry(p, &br->port_list, list) {
1056 1057 1058
		if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
		    !nbp_vlan_delete(p, pvid))
			br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1059 1060 1061 1062 1063
	}

	br->default_pvid = 0;
}

1064 1065
int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
			       struct netlink_ext_ack *extack)
1066
{
1067
	const struct net_bridge_vlan *pvent;
1068
	struct net_bridge_vlan_group *vg;
1069
	struct net_bridge_port *p;
1070 1071
	unsigned long *changed;
	bool vlchange;
1072 1073 1074
	u16 old_pvid;
	int err = 0;

1075 1076 1077 1078 1079
	if (!pvid) {
		br_vlan_disable_default_pvid(br);
		return 0;
	}

1080
	changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
1081 1082 1083 1084 1085 1086 1087 1088
	if (!changed)
		return -ENOMEM;

	old_pvid = br->default_pvid;

	/* Update default_pvid config only if we do not conflict with
	 * user configuration.
	 */
1089 1090 1091
	vg = br_vlan_group(br);
	pvent = br_vlan_find(vg, pvid);
	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
1092
	    (!pvent || !br_vlan_should_use(pvent))) {
1093 1094
		err = br_vlan_add(br, pvid,
				  BRIDGE_VLAN_INFO_PVID |
1095
				  BRIDGE_VLAN_INFO_UNTAGGED |
1096
				  BRIDGE_VLAN_INFO_BRENTRY,
1097
				  &vlchange, extack);
1098 1099
		if (err)
			goto out;
1100 1101 1102 1103

		if (br_vlan_delete(br, old_pvid))
			br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
		br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
1104
		__set_bit(0, changed);
1105 1106 1107 1108 1109 1110
	}

	list_for_each_entry(p, &br->port_list, list) {
		/* Update default_pvid config only if we do not conflict with
		 * user configuration.
		 */
1111
		vg = nbp_vlan_group(p);
1112
		if ((old_pvid &&
1113 1114
		     !vlan_default_pvid(vg, old_pvid)) ||
		    br_vlan_find(vg, pvid))
1115 1116 1117 1118
			continue;

		err = nbp_vlan_add(p, pvid,
				   BRIDGE_VLAN_INFO_PVID |
1119
				   BRIDGE_VLAN_INFO_UNTAGGED,
1120
				   &vlchange, extack);
1121 1122
		if (err)
			goto err_port;
1123 1124 1125
		if (nbp_vlan_delete(p, old_pvid))
			br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
		br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
1126
		__set_bit(p->port_no, changed);
1127 1128 1129 1130 1131
	}

	br->default_pvid = pvid;

out:
1132
	bitmap_free(changed);
1133 1134 1135 1136 1137 1138 1139
	return err;

err_port:
	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
		if (!test_bit(p->port_no, changed))
			continue;

1140
		if (old_pvid) {
1141 1142
			nbp_vlan_add(p, old_pvid,
				     BRIDGE_VLAN_INFO_PVID |
1143
				     BRIDGE_VLAN_INFO_UNTAGGED,
1144
				     &vlchange, NULL);
1145 1146
			br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
		}
1147
		nbp_vlan_delete(p, pvid);
1148
		br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1149 1150 1151
	}

	if (test_bit(0, changed)) {
1152
		if (old_pvid) {
1153 1154
			br_vlan_add(br, old_pvid,
				    BRIDGE_VLAN_INFO_PVID |
1155
				    BRIDGE_VLAN_INFO_UNTAGGED |
1156
				    BRIDGE_VLAN_INFO_BRENTRY,
1157
				    &vlchange, NULL);
1158 1159
			br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
		}
1160
		br_vlan_delete(br, pvid);
1161
		br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1162 1163 1164 1165
	}
	goto out;
}

1166 1167
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val,
			     struct netlink_ext_ack *extack)
1168 1169 1170 1171
{
	u16 pvid = val;
	int err = 0;

1172
	if (val >= VLAN_VID_MASK)
1173 1174 1175
		return -EINVAL;

	if (pvid == br->default_pvid)
1176
		goto out;
1177 1178

	/* Only allow default pvid change when filtering is disabled */
1179
	if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1180 1181
		pr_info_once("Please disable vlan filtering to change default_pvid\n");
		err = -EPERM;
1182
		goto out;
1183
	}
1184
	err = __br_vlan_set_default_pvid(br, pvid, extack);
1185
out:
1186 1187 1188
	return err;
}

1189
int br_vlan_init(struct net_bridge *br)
1190
{
1191
	struct net_bridge_vlan_group *vg;
1192 1193
	int ret = -ENOMEM;

1194 1195
	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
	if (!vg)
1196
		goto out;
1197
	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1198 1199
	if (ret)
		goto err_rhtbl;
1200 1201 1202
	ret = vlan_tunnel_init(vg);
	if (ret)
		goto err_tunnel_init;
1203
	INIT_LIST_HEAD(&vg->vlan_list);
1204
	br->vlan_proto = htons(ETH_P_8021Q);
1205
	br->default_pvid = 1;
1206
	rcu_assign_pointer(br->vlgrp, vg);
1207 1208 1209 1210

out:
	return ret;

1211
err_tunnel_init:
1212
	rhashtable_destroy(&vg->vlan_hash);
1213
err_rhtbl:
1214
	kfree(vg);
1215 1216 1217 1218

	goto out;
}

1219
int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1220
{
1221 1222 1223 1224
	struct switchdev_attr attr = {
		.orig_dev = p->br->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1225
		.u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1226
	};
1227
	struct net_bridge_vlan_group *vg;
1228 1229
	int ret = -ENOMEM;

1230 1231
	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
	if (!vg)
1232 1233
		goto out;

1234
	ret = switchdev_port_attr_set(p->dev, &attr, extack);
1235 1236 1237
	if (ret && ret != -EOPNOTSUPP)
		goto err_vlan_enabled;

1238
	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1239 1240
	if (ret)
		goto err_rhtbl;
1241 1242 1243
	ret = vlan_tunnel_init(vg);
	if (ret)
		goto err_tunnel_init;
1244
	INIT_LIST_HEAD(&vg->vlan_list);
1245
	rcu_assign_pointer(p->vlgrp, vg);
1246
	if (p->br->default_pvid) {
1247 1248
		bool changed;

1249 1250
		ret = nbp_vlan_add(p, p->br->default_pvid,
				   BRIDGE_VLAN_INFO_PVID |
1251
				   BRIDGE_VLAN_INFO_UNTAGGED,
1252
				   &changed, extack);
1253 1254
		if (ret)
			goto err_vlan_add;
1255
		br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
1256 1257 1258 1259 1260
	}
out:
	return ret;

err_vlan_add:
1261 1262
	RCU_INIT_POINTER(p->vlgrp, NULL);
	synchronize_rcu();
1263 1264 1265
	vlan_tunnel_deinit(vg);
err_tunnel_init:
	rhashtable_destroy(&vg->vlan_hash);
1266
err_rhtbl:
1267
err_vlan_enabled:
1268
	kfree(vg);
1269 1270

	goto out;
1271 1272
}

1273 1274
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
1275
 * changed must be true only if the vlan was created or updated
1276
 */
1277
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1278
		 bool *changed, struct netlink_ext_ack *extack)
1279
{
1280 1281
	struct net_bridge_vlan *vlan;
	int ret;
1282 1283 1284

	ASSERT_RTNL();

1285
	*changed = false;
1286
	vlan = br_vlan_find(nbp_vlan_group(port), vid);
1287
	if (vlan) {
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
		bool would_change = __vlan_flags_would_change(vlan, flags);

		if (would_change) {
			/* Pass the flags to the hardware bridge */
			ret = br_switchdev_port_vlan_add(port->dev, vid,
							 flags, extack);
			if (ret && ret != -EOPNOTSUPP)
				return ret;
		}

		__vlan_flags_commit(vlan, flags);
		*changed = would_change;
1300

1301
		return 0;
1302 1303
	}

1304 1305 1306
	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
		return -ENOMEM;
1307

1308 1309
	vlan->vid = vid;
	vlan->port = port;
1310
	ret = __vlan_add(vlan, flags, extack);
1311 1312
	if (ret)
		kfree(vlan);
1313 1314
	else
		*changed = true;
1315

1316
	return ret;
1317 1318
}

1319 1320 1321
/* Must be protected by RTNL.
 * Must be called with vid in range from 1 to 4094 inclusive.
 */
1322 1323
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{
1324
	struct net_bridge_vlan *v;
1325 1326 1327

	ASSERT_RTNL();

1328
	v = br_vlan_find(nbp_vlan_group(port), vid);
1329 1330
	if (!v)
		return -ENOENT;
1331
	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1332
	br_fdb_delete_by_port(port->br, port, vid, 0);
1333

1334
	return __vlan_del(v);
1335 1336 1337 1338
}

void nbp_vlan_flush(struct net_bridge_port *port)
{
1339 1340
	struct net_bridge_vlan_group *vg;

1341 1342
	ASSERT_RTNL();

1343
	vg = nbp_vlan_group(port);
1344
	__vlan_flush(port->br, port, vg);
1345 1346 1347
	RCU_INIT_POINTER(port->vlgrp, NULL);
	synchronize_rcu();
	__vlan_group_free(vg);
1348
}
1349 1350

void br_vlan_get_stats(const struct net_bridge_vlan *v,
1351
		       struct pcpu_sw_netstats *stats)
1352 1353 1354 1355 1356 1357
{
	int i;

	memset(stats, 0, sizeof(*stats));
	for_each_possible_cpu(i) {
		u64 rxpackets, rxbytes, txpackets, txbytes;
1358
		struct pcpu_sw_netstats *cpu_stats;
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
		unsigned int start;

		cpu_stats = per_cpu_ptr(v->stats, i);
		do {
			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
			rxpackets = cpu_stats->rx_packets;
			rxbytes = cpu_stats->rx_bytes;
			txbytes = cpu_stats->tx_bytes;
			txpackets = cpu_stats->tx_packets;
		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));

		stats->rx_packets += rxpackets;
		stats->rx_bytes += rxbytes;
		stats->tx_bytes += txbytes;
		stats->tx_packets += txpackets;
	}
}
1376

1377
int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1378 1379
{
	struct net_bridge_vlan_group *vg;
1380
	struct net_bridge_port *p;
1381

1382 1383
	ASSERT_RTNL();
	p = br_port_get_check_rtnl(dev);
1384 1385 1386
	if (p)
		vg = nbp_vlan_group(p);
	else if (netif_is_bridge_master(dev))
1387 1388 1389 1390 1391 1392 1393 1394 1395
		vg = br_vlan_group(netdev_priv(dev));
	else
		return -EINVAL;

	*p_pvid = br_get_pvid(vg);
	return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_pvid);

1396 1397
int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
{
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
	struct net_bridge_vlan_group *vg;
	struct net_bridge_port *p;

	p = br_port_get_check_rcu(dev);
	if (p)
		vg = nbp_vlan_group_rcu(p);
	else if (netif_is_bridge_master(dev))
		vg = br_vlan_group_rcu(netdev_priv(dev));
	else
		return -EINVAL;

	*p_pvid = br_get_pvid(vg);
	return 0;
1411 1412 1413
}
EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);

1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
				    struct net_device_path_ctx *ctx,
				    struct net_device_path *path)
{
	struct net_bridge_vlan_group *vg;
	int idx = ctx->num_vlans - 1;
	u16 vid;

	path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;

	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
		return;

	vg = br_vlan_group(br);

	if (idx >= 0 &&
	    ctx->vlan[idx].proto == br->vlan_proto) {
		vid = ctx->vlan[idx].id;
	} else {
		path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG;
		vid = br_get_pvid(vg);
	}

	path->bridge.vlan_id = vid;
	path->bridge.vlan_proto = br->vlan_proto;
}

int br_vlan_fill_forward_path_mode(struct net_bridge *br,
				   struct net_bridge_port *dst,
				   struct net_device_path *path)
{
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *v;

	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
		return 0;

	vg = nbp_vlan_group_rcu(dst);
	v = br_vlan_find(vg, path->bridge.vlan_id);
	if (!v || !br_vlan_should_use(v))
		return -EINVAL;

	if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
		return 0;

	if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
		path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1461 1462
	else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
		path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
1463 1464 1465 1466 1467 1468
	else
		path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;

	return 0;
}

1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
int br_vlan_get_info(const struct net_device *dev, u16 vid,
		     struct bridge_vlan_info *p_vinfo)
{
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *v;
	struct net_bridge_port *p;

	ASSERT_RTNL();
	p = br_port_get_check_rtnl(dev);
	if (p)
		vg = nbp_vlan_group(p);
1480 1481
	else if (netif_is_bridge_master(dev))
		vg = br_vlan_group(netdev_priv(dev));
1482 1483 1484 1485 1486 1487 1488 1489 1490
	else
		return -EINVAL;

	v = br_vlan_find(vg, vid);
	if (!v)
		return -ENOENT;

	p_vinfo->vid = vid;
	p_vinfo->flags = v->flags;
1491 1492
	if (vid == br_get_pvid(vg))
		p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1493 1494 1495
	return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_info);
1496

1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
			 struct bridge_vlan_info *p_vinfo)
{
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *v;
	struct net_bridge_port *p;

	p = br_port_get_check_rcu(dev);
	if (p)
		vg = nbp_vlan_group_rcu(p);
	else if (netif_is_bridge_master(dev))
		vg = br_vlan_group_rcu(netdev_priv(dev));
	else
		return -EINVAL;

	v = br_vlan_find(vg, vid);
	if (!v)
		return -ENOENT;

	p_vinfo->vid = vid;
	p_vinfo->flags = v->flags;
	if (vid == br_get_pvid(vg))
		p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
	return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_info_rcu);

1524 1525 1526 1527 1528 1529 1530
static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
{
	return is_vlan_dev(dev) &&
		!!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
}

static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1531
			       __always_unused struct netdev_nested_priv *priv)
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
{
	return br_vlan_is_bind_vlan_dev(dev);
}

static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
{
	int found;

	rcu_read_lock();
	found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
					      NULL);
	rcu_read_unlock();

	return !!found;
}

struct br_vlan_bind_walk_data {
	u16 vid;
	struct net_device *result;
};

static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1554
					  struct netdev_nested_priv *priv)
1555
{
1556
	struct br_vlan_bind_walk_data *data = priv->data;
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
	int found = 0;

	if (br_vlan_is_bind_vlan_dev(dev) &&
	    vlan_dev_priv(dev)->vlan_id == data->vid) {
		data->result = dev;
		found = 1;
	}

	return found;
}

static struct net_device *
br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
{
	struct br_vlan_bind_walk_data data = {
		.vid = vid,
	};
1574 1575 1576
	struct netdev_nested_priv priv = {
		.data = (void *)&data,
	};
1577 1578 1579

	rcu_read_lock();
	netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1580
				      &priv);
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
	rcu_read_unlock();

	return data.result;
}

static bool br_vlan_is_dev_up(const struct net_device *dev)
{
	return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
}

static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
				       struct net_device *vlan_dev)
{
	u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
	struct net_bridge_vlan_group *vg;
	struct net_bridge_port *p;
	bool has_carrier = false;

1599 1600 1601 1602 1603
	if (!netif_carrier_ok(br->dev)) {
		netif_carrier_off(vlan_dev);
		return;
	}

1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
	list_for_each_entry(p, &br->port_list, list) {
		vg = nbp_vlan_group(p);
		if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
			has_carrier = true;
			break;
		}
	}

	if (has_carrier)
		netif_carrier_on(vlan_dev);
	else
		netif_carrier_off(vlan_dev);
}

static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
{
	struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
	struct net_bridge_vlan *vlan;
	struct net_device *vlan_dev;

	list_for_each_entry(vlan, &vg->vlan_list, vlist) {
		vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
							   vlan->vid);
		if (vlan_dev) {
1628 1629 1630 1631
			if (br_vlan_is_dev_up(p->dev)) {
				if (netif_carrier_ok(p->br->dev))
					netif_carrier_on(vlan_dev);
			} else {
1632
				br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1633
			}
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
		}
	}
}

static void br_vlan_upper_change(struct net_device *dev,
				 struct net_device *upper_dev,
				 bool linking)
{
	struct net_bridge *br = netdev_priv(dev);

	if (!br_vlan_is_bind_vlan_dev(upper_dev))
		return;

	if (linking) {
		br_vlan_set_vlan_dev_state(br, upper_dev);
		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
	} else {
		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
			      br_vlan_has_upper_bind_vlan_dev(dev));
	}
}

1656 1657 1658 1659 1660
struct br_vlan_link_state_walk_data {
	struct net_bridge *br;
};

static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1661
					struct netdev_nested_priv *priv)
1662
{
1663
	struct br_vlan_link_state_walk_data *data = priv->data;
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676

	if (br_vlan_is_bind_vlan_dev(vlan_dev))
		br_vlan_set_vlan_dev_state(data->br, vlan_dev);

	return 0;
}

static void br_vlan_link_state_change(struct net_device *dev,
				      struct net_bridge *br)
{
	struct br_vlan_link_state_walk_data data = {
		.br = br
	};
1677 1678 1679
	struct netdev_nested_priv priv = {
		.data = (void *)&data,
	};
1680 1681 1682

	rcu_read_lock();
	netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1683
				      &priv);
1684 1685 1686
	rcu_read_unlock();
}

1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
/* Must be protected by RTNL. */
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
{
	struct net_device *vlan_dev;

	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
		return;

	vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
	if (vlan_dev)
		br_vlan_set_vlan_dev_state(p->br, vlan_dev);
}

1700
/* Must be protected by RTNL. */
1701
int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1702 1703
{
	struct netdev_notifier_changeupper_info *info;
1704
	struct net_bridge *br = netdev_priv(dev);
1705 1706
	int vlcmd = 0, ret = 0;
	bool changed = false;
1707 1708

	switch (event) {
1709 1710 1711 1712 1713
	case NETDEV_REGISTER:
		ret = br_vlan_add(br, br->default_pvid,
				  BRIDGE_VLAN_INFO_PVID |
				  BRIDGE_VLAN_INFO_UNTAGGED |
				  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1714
		vlcmd = RTM_NEWVLAN;
1715 1716
		break;
	case NETDEV_UNREGISTER:
1717 1718
		changed = !br_vlan_delete(br, br->default_pvid);
		vlcmd = RTM_DELVLAN;
1719
		break;
1720 1721 1722 1723
	case NETDEV_CHANGEUPPER:
		info = ptr;
		br_vlan_upper_change(dev, info->upper_dev, info->linking);
		break;
1724 1725 1726 1727

	case NETDEV_CHANGE:
	case NETDEV_UP:
		if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1728
			break;
1729 1730
		br_vlan_link_state_change(dev, br);
		break;
1731
	}
1732 1733
	if (changed)
		br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1734 1735

	return ret;
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
}

/* Must be protected by RTNL. */
void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
{
	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
		return;

	switch (event) {
	case NETDEV_CHANGE:
	case NETDEV_DOWN:
	case NETDEV_UP:
		br_vlan_set_all_vlan_dev_state(p);
		break;
	}
}
1752

1753 1754 1755
static bool br_vlan_stats_fill(struct sk_buff *skb,
			       const struct net_bridge_vlan *v)
{
1756
	struct pcpu_sw_netstats stats;
1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
	struct nlattr *nest;

	nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
	if (!nest)
		return false;

	br_vlan_get_stats(v, &stats);
	if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
			      BRIDGE_VLANDB_STATS_PAD) ||
	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
			      stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
			      BRIDGE_VLANDB_STATS_PAD) ||
	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
			      stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
		goto out_err;

	nla_nest_end(skb, nest);

	return true;

out_err:
	nla_nest_cancel(skb, nest);
	return false;
}

1783
/* v_opts is used to dump the options which must be equal in the whole range */
1784
static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1785
			      const struct net_bridge_vlan *v_opts,
1786 1787
			      u16 flags,
			      bool dump_stats)
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805
{
	struct bridge_vlan_info info;
	struct nlattr *nest;

	nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
	if (!nest)
		return false;

	memset(&info, 0, sizeof(info));
	info.vid = vid;
	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
		info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
	if (flags & BRIDGE_VLAN_INFO_PVID)
		info.flags |= BRIDGE_VLAN_INFO_PVID;

	if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
		goto out_err;

1806 1807 1808 1809 1810
	if (vid_range && vid < vid_range &&
	    !(flags & BRIDGE_VLAN_INFO_PVID) &&
	    nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
		goto out_err;

1811 1812 1813 1814 1815 1816 1817
	if (v_opts) {
		if (!br_vlan_opts_fill(skb, v_opts))
			goto out_err;

		if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
			goto out_err;
	}
1818

1819 1820 1821 1822 1823 1824 1825 1826 1827
	nla_nest_end(skb, nest);

	return true;

out_err:
	nla_nest_cancel(skb, nest);
	return false;
}

1828 1829 1830 1831 1832
static size_t rtnl_vlan_nlmsg_size(void)
{
	return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
		+ nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
		+ nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1833 1834
		+ nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
		+ br_vlan_opts_nl_size(); /* bridge vlan options */
1835 1836 1837 1838 1839 1840 1841 1842
}

void br_vlan_notify(const struct net_bridge *br,
		    const struct net_bridge_port *p,
		    u16 vid, u16 vid_range,
		    int cmd)
{
	struct net_bridge_vlan_group *vg;
1843
	struct net_bridge_vlan *v = NULL;
1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
	struct br_vlan_msg *bvm;
	struct nlmsghdr *nlh;
	struct sk_buff *skb;
	int err = -ENOBUFS;
	struct net *net;
	u16 flags = 0;
	int ifindex;

	/* right now notifications are done only with rtnl held */
	ASSERT_RTNL();

	if (p) {
		ifindex = p->dev->ifindex;
		vg = nbp_vlan_group(p);
		net = dev_net(p->dev);
	} else {
		ifindex = br->dev->ifindex;
		vg = br_vlan_group(br);
		net = dev_net(br->dev);
	}

	skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
	if (!skb)
		goto out_err;

	err = -EMSGSIZE;
	nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
	if (!nlh)
		goto out_err;
	bvm = nlmsg_data(nlh);
	memset(bvm, 0, sizeof(*bvm));
	bvm->family = AF_BRIDGE;
	bvm->ifindex = ifindex;

	switch (cmd) {
	case RTM_NEWVLAN:
		/* need to find the vlan due to flags/options */
		v = br_vlan_find(vg, vid);
		if (!v || !br_vlan_should_use(v))
			goto out_kfree;

		flags = v->flags;
		if (br_get_pvid(vg) == v->vid)
			flags |= BRIDGE_VLAN_INFO_PVID;
		break;
	case RTM_DELVLAN:
		break;
	default:
		goto out_kfree;
	}

1895
	if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907
		goto out_err;

	nlmsg_end(skb, nlh);
	rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
	return;

out_err:
	rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
out_kfree:
	kfree_skb(skb);
}

1908
/* check if v_curr can enter a range ending in range_end */
1909 1910
bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
			     const struct net_bridge_vlan *range_end)
1911 1912
{
	return v_curr->vid - range_end->vid == 1 &&
1913
	       range_end->flags == v_curr->flags &&
1914
	       br_vlan_opts_eq_range(v_curr, range_end);
1915 1916
}

1917 1918
static int br_vlan_dump_dev(const struct net_device *dev,
			    struct sk_buff *skb,
1919 1920
			    struct netlink_callback *cb,
			    u32 dump_flags)
1921
{
1922
	struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1923
	bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL);
1924
	bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941
	struct net_bridge_vlan_group *vg;
	int idx = 0, s_idx = cb->args[1];
	struct nlmsghdr *nlh = NULL;
	struct net_bridge_port *p;
	struct br_vlan_msg *bvm;
	struct net_bridge *br;
	int err = 0;
	u16 pvid;

	if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
		return -EINVAL;

	if (netif_is_bridge_master(dev)) {
		br = netdev_priv(dev);
		vg = br_vlan_group_rcu(br);
		p = NULL;
	} else {
1942 1943 1944 1945
		/* global options are dumped only for bridge devices */
		if (dump_global)
			return 0;

1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
		p = br_port_get_rcu(dev);
		if (WARN_ON(!p))
			return -EINVAL;
		vg = nbp_vlan_group_rcu(p);
		br = p->br;
	}

	if (!vg)
		return 0;

	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
			RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
	if (!nlh)
		return -EMSGSIZE;
	bvm = nlmsg_data(nlh);
	memset(bvm, 0, sizeof(*bvm));
	bvm->family = PF_BRIDGE;
	bvm->ifindex = dev->ifindex;
	pvid = br_get_pvid(vg);

1966
	/* idx must stay at range's beginning until it is filled in */
1967
	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
1968
		if (!dump_global && !br_vlan_should_use(v))
1969
			continue;
1970 1971 1972
		if (idx < s_idx) {
			idx++;
			continue;
1973
		}
1974 1975 1976 1977 1978 1979 1980

		if (!range_start) {
			range_start = v;
			range_end = v;
			continue;
		}

1981 1982
		if (dump_global) {
			if (br_vlan_global_opts_can_enter_range(v, range_end))
1983
				goto update_end;
1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995
			if (!br_vlan_global_opts_fill(skb, range_start->vid,
						      range_end->vid,
						      range_start)) {
				err = -EMSGSIZE;
				break;
			}
			/* advance number of filled vlans */
			idx += range_end->vid - range_start->vid + 1;

			range_start = v;
		} else if (dump_stats || v->vid == pvid ||
			   !br_vlan_can_enter_range(v, range_end)) {
1996
			u16 vlan_flags = br_vlan_flags(range_start, pvid);
1997 1998

			if (!br_vlan_fill_vids(skb, range_start->vid,
1999
					       range_end->vid, range_start,
2000
					       vlan_flags, dump_stats)) {
2001 2002 2003 2004 2005 2006 2007 2008
				err = -EMSGSIZE;
				break;
			}
			/* advance number of filled vlans */
			idx += range_end->vid - range_start->vid + 1;

			range_start = v;
		}
2009
update_end:
2010
		range_end = v;
2011
	}
2012 2013 2014 2015 2016 2017

	/* err will be 0 and range_start will be set in 3 cases here:
	 * - first vlan (range_start == range_end)
	 * - last vlan (range_start == range_end, not in range)
	 * - last vlan range (range_start != range_end, in range)
	 */
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029
	if (!err && range_start) {
		if (dump_global &&
		    !br_vlan_global_opts_fill(skb, range_start->vid,
					      range_end->vid, range_start))
			err = -EMSGSIZE;
		else if (!dump_global &&
			 !br_vlan_fill_vids(skb, range_start->vid,
					    range_end->vid, range_start,
					    br_vlan_flags(range_start, pvid),
					    dump_stats))
			err = -EMSGSIZE;
	}
2030 2031 2032

	cb->args[1] = err ? idx : 0;

2033 2034 2035 2036 2037
	nlmsg_end(skb, nlh);

	return err;
}

2038 2039 2040 2041
static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
	[BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
};

2042 2043
static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
2044
	struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
2045 2046 2047 2048
	int idx = 0, err = 0, s_idx = cb->args[0];
	struct net *net = sock_net(skb->sk);
	struct br_vlan_msg *bvm;
	struct net_device *dev;
2049
	u32 dump_flags = 0;
2050

2051 2052
	err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
			  br_vlan_db_dump_pol, cb->extack);
2053 2054 2055 2056
	if (err < 0)
		return err;

	bvm = nlmsg_data(cb->nlh);
2057 2058
	if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
		dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
2059 2060 2061 2062 2063 2064 2065 2066

	rcu_read_lock();
	if (bvm->ifindex) {
		dev = dev_get_by_index_rcu(net, bvm->ifindex);
		if (!dev) {
			err = -ENODEV;
			goto out_err;
		}
2067
		err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2068 2069
		/* if the dump completed without an error we return 0 here */
		if (err != -EMSGSIZE)
2070 2071 2072 2073 2074 2075
			goto out_err;
	} else {
		for_each_netdev_rcu(net, dev) {
			if (idx < s_idx)
				goto skip;

2076
			err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093
			if (err == -EMSGSIZE)
				break;
skip:
			idx++;
		}
	}
	cb->args[0] = idx;
	rcu_read_unlock();

	return skb->len;

out_err:
	rcu_read_unlock();

	return err;
}

2094
static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
2095 2096
	[BRIDGE_VLANDB_ENTRY_INFO]	=
		NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
2097
	[BRIDGE_VLANDB_ENTRY_RANGE]	= { .type = NLA_U16 },
2098
	[BRIDGE_VLANDB_ENTRY_STATE]	= { .type = NLA_U8 },
2099
	[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
2100
	[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]	= { .type = NLA_U8 },
2101 2102 2103 2104 2105 2106
};

static int br_vlan_rtm_process_one(struct net_device *dev,
				   const struct nlattr *attr,
				   int cmd, struct netlink_ext_ack *extack)
{
2107
	struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
2108
	struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
2109
	bool changed = false, skip_processing = false;
2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
	struct net_bridge_vlan_group *vg;
	struct net_bridge_port *p = NULL;
	int err = 0, cmdmap = 0;
	struct net_bridge *br;

	if (netif_is_bridge_master(dev)) {
		br = netdev_priv(dev);
		vg = br_vlan_group(br);
	} else {
		p = br_port_get_rtnl(dev);
		if (WARN_ON(!p))
			return -ENODEV;
		br = p->br;
		vg = nbp_vlan_group(p);
	}

	if (WARN_ON(!vg))
		return -ENODEV;

	err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
			       br_vlan_db_policy, extack);
	if (err)
		return err;

	if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
		NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
		return -EINVAL;
	}
2138
	memset(&vrange_end, 0, sizeof(vrange_end));
2139 2140 2141 2142 2143 2144 2145 2146 2147 2148

	vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
	if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
			    BRIDGE_VLAN_INFO_RANGE_END)) {
		NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
		return -EINVAL;
	}
	if (!br_vlan_valid_id(vinfo->vid, extack))
		return -EINVAL;

2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
	if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
		vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
		/* validate user-provided flags without RANGE_BEGIN */
		vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
		vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;

		/* vinfo_last is the range start, vinfo the range end */
		vinfo_last = vinfo;
		vinfo = &vrange_end;

		if (!br_vlan_valid_id(vinfo->vid, extack) ||
		    !br_vlan_valid_range(vinfo, vinfo_last, extack))
			return -EINVAL;
	}

2164 2165 2166
	switch (cmd) {
	case RTM_NEWVLAN:
		cmdmap = RTM_SETLINK;
2167
		skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
2168
		break;
2169 2170 2171
	case RTM_DELVLAN:
		cmdmap = RTM_DELLINK;
		break;
2172 2173
	}

2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
	if (!skip_processing) {
		struct bridge_vlan_info *tmp_last = vinfo_last;

		/* br_process_vlan_info may overwrite vinfo_last */
		err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
					   &changed, extack);

		/* notify first if anything changed */
		if (changed)
			br_ifinfo_notify(cmdmap, br, p);

		if (err)
			return err;
	}

	/* deal with options */
	if (cmd == RTM_NEWVLAN) {
		struct net_bridge_vlan *range_start, *range_end;

		if (vinfo_last) {
			range_start = br_vlan_find(vg, vinfo_last->vid);
			range_end = br_vlan_find(vg, vinfo->vid);
		} else {
			range_start = br_vlan_find(vg, vinfo->vid);
			range_end = range_start;
		}

		err = br_vlan_process_options(br, p, range_start, range_end,
					      tb, extack);
	}
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234

	return err;
}

static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
			       struct netlink_ext_ack *extack)
{
	struct net *net = sock_net(skb->sk);
	struct br_vlan_msg *bvm;
	struct net_device *dev;
	struct nlattr *attr;
	int err, vlans = 0;
	int rem;

	/* this should validate the header and check for remaining bytes */
	err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
			  extack);
	if (err < 0)
		return err;

	bvm = nlmsg_data(nlh);
	dev = __dev_get_by_index(net, bvm->ifindex);
	if (!dev)
		return -ENODEV;

	if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
		NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
		return -EINVAL;
	}

	nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246
		switch (nla_type(attr)) {
		case BRIDGE_VLANDB_ENTRY:
			err = br_vlan_rtm_process_one(dev, attr,
						      nlh->nlmsg_type,
						      extack);
			break;
		case BRIDGE_VLANDB_GLOBAL_OPTIONS:
			err = br_vlan_rtm_process_global_options(dev, attr,
								 nlh->nlmsg_type,
								 extack);
			break;
		default:
2247
			continue;
2248
		}
2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261

		vlans++;
		if (err)
			break;
	}
	if (!vlans) {
		NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
		err = -EINVAL;
	}

	return err;
}

2262 2263 2264 2265
void br_vlan_rtnl_init(void)
{
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
			     br_vlan_rtm_dump, 0);
2266 2267
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
			     br_vlan_rtm_process, NULL, 0);
2268 2269
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
			     br_vlan_rtm_process, NULL, 0);
2270 2271 2272 2273 2274
}

void br_vlan_rtnl_uninit(void)
{
	rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
2275
	rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
2276
	rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
2277
}