br_multicast.c 125.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8
/*
 * Bridge multicast support.
 *
 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
 */

#include <linux/err.h>
9
#include <linux/export.h>
10 11
#include <linux/if_ether.h>
#include <linux/igmp.h>
12
#include <linux/in.h>
13 14
#include <linux/jhash.h>
#include <linux/kernel.h>
15
#include <linux/log2.h>
16 17 18 19 20 21 22
#include <linux/netdevice.h>
#include <linux/netfilter_bridge.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/timer.h>
23
#include <linux/inetdevice.h>
24
#include <linux/mroute.h>
25
#include <net/ip.h>
26
#include <net/switchdev.h>
E
Eric Dumazet 已提交
27
#if IS_ENABLED(CONFIG_IPV6)
28
#include <linux/icmpv6.h>
29 30
#include <net/ipv6.h>
#include <net/mld.h>
31
#include <net/ip6_checksum.h>
32
#include <net/addrconf.h>
33
#endif
34 35

#include "br_private.h"
36
#include "br_private_mcast_eht.h"
37

38 39 40 41 42 43 44
static const struct rhashtable_params br_mdb_rht_params = {
	.head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
	.key_offset = offsetof(struct net_bridge_mdb_entry, addr),
	.key_len = sizeof(struct br_ip),
	.automatic_shrinking = true,
};

45 46 47 48 49 50 51
static const struct rhashtable_params br_sg_port_rht_params = {
	.head_offset = offsetof(struct net_bridge_port_group, rhnode),
	.key_offset = offsetof(struct net_bridge_port_group, key),
	.key_len = sizeof(struct net_bridge_port_group_sg_key),
	.automatic_shrinking = true,
};

52
static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
53
				       struct bridge_mcast_own_query *query);
54 55 56 57
static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
					struct net_bridge_mcast_port *pmctx);
static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
					 struct net_bridge_mcast_port *pmctx,
58
					 __be32 group,
F
Felix Fietkau 已提交
59 60
					 __u16 vid,
					 const unsigned char *src);
61
static void br_multicast_port_group_rexmit(struct timer_list *t);
F
Felix Fietkau 已提交
62

63
static void
64 65 66
br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
					struct net_bridge_mcast_port *pmctx);
67
#if IS_ENABLED(CONFIG_IPV6)
68 69
static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
					 struct net_bridge_mcast_port *pmctx,
70
					 const struct in6_addr *group,
F
Felix Fietkau 已提交
71
					 __u16 vid, const unsigned char *src);
72
#endif
73
static struct net_bridge_port_group *
74 75
__br_multicast_add_group(struct net_bridge_mcast *brmctx,
			 struct net_bridge_mcast_port *pmctx,
76 77 78
			 struct br_ip *group,
			 const unsigned char *src,
			 u8 filter_mode,
79 80
			 bool igmpv2_mldv1,
			 bool blocked);
81 82
static void br_multicast_find_del_pg(struct net_bridge *br,
				     struct net_bridge_port_group *pg);
83
static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
84

85 86 87 88 89 90 91 92 93 94
static struct net_bridge_port_group *
br_sg_port_find(struct net_bridge *br,
		struct net_bridge_port_group_sg_key *sg_p)
{
	lockdep_assert_held_once(&br->multicast_lock);

	return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
				      br_sg_port_rht_params);
}

95 96
static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
						      struct br_ip *dst)
97
{
98
	return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
99 100
}

101 102
struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
					   struct br_ip *dst)
103
{
104
	struct net_bridge_mdb_entry *ent;
105

106
	lockdep_assert_held_once(&br->multicast_lock);
107

108 109 110
	rcu_read_lock();
	ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
	rcu_read_unlock();
111

112
	return ent;
113 114
}

115 116
static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
						   __be32 dst, __u16 vid)
117
{
118 119
	struct br_ip br_dst;

120
	memset(&br_dst, 0, sizeof(br_dst));
121
	br_dst.dst.ip4 = dst;
122
	br_dst.proto = htons(ETH_P_IP);
123
	br_dst.vid = vid;
124

125
	return br_mdb_ip_get(br, &br_dst);
126 127
}

E
Eric Dumazet 已提交
128
#if IS_ENABLED(CONFIG_IPV6)
129 130 131
static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
						   const struct in6_addr *dst,
						   __u16 vid)
132 133
{
	struct br_ip br_dst;
134

135
	memset(&br_dst, 0, sizeof(br_dst));
136
	br_dst.dst.ip6 = *dst;
137
	br_dst.proto = htons(ETH_P_IPV6);
138
	br_dst.vid = vid;
139

140
	return br_mdb_ip_get(br, &br_dst);
141 142 143
}
#endif

144
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
145
					struct sk_buff *skb, u16 vid)
146
{
147
	struct net_bridge *br = brmctx->br;
148 149
	struct br_ip ip;

150 151
	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
	    br_multicast_ctx_vlan_global_disabled(brmctx))
152 153
		return NULL;

154
	if (BR_INPUT_SKB_CB(skb)->igmp)
155 156
		return NULL;

157
	memset(&ip, 0, sizeof(ip));
158
	ip.proto = skb->protocol;
159
	ip.vid = vid;
160

161 162
	switch (skb->protocol) {
	case htons(ETH_P_IP):
163
		ip.dst.ip4 = ip_hdr(skb)->daddr;
164
		if (brmctx->multicast_igmp_version == 3) {
165 166 167 168 169 170 171 172
			struct net_bridge_mdb_entry *mdb;

			ip.src.ip4 = ip_hdr(skb)->saddr;
			mdb = br_mdb_ip_get_rcu(br, &ip);
			if (mdb)
				return mdb;
			ip.src.ip4 = 0;
		}
173
		break;
E
Eric Dumazet 已提交
174
#if IS_ENABLED(CONFIG_IPV6)
175
	case htons(ETH_P_IPV6):
176
		ip.dst.ip6 = ipv6_hdr(skb)->daddr;
177
		if (brmctx->multicast_mld_version == 2) {
178 179 180 181 182 183 184 185
			struct net_bridge_mdb_entry *mdb;

			ip.src.ip6 = ipv6_hdr(skb)->saddr;
			mdb = br_mdb_ip_get_rcu(br, &ip);
			if (mdb)
				return mdb;
			memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
		}
186 187
		break;
#endif
188
	default:
189 190
		ip.proto = 0;
		ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
191 192
	}

193
	return br_mdb_ip_get_rcu(br, &ip);
194 195
}

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
/* IMPORTANT: this function must be used only when the contexts cannot be
 * passed down (e.g. timer) and must be used for read-only purposes because
 * the vlan snooping option can change, so it can return any context
 * (non-vlan or vlan). Its initial intended purpose is to read timer values
 * from the *current* context based on the option. At worst that could lead
 * to inconsistent timers when the contexts are changed, i.e. src timer
 * which needs to re-arm with a specific delay taken from the old context
 */
static struct net_bridge_mcast_port *
br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
{
	struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
	struct net_bridge_vlan *vlan;

	lockdep_assert_held_once(&pg->key.port->br->multicast_lock);

	/* if vlan snooping is disabled use the port's multicast context */
	if (!pg->key.addr.vid ||
	    !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
		goto out;

	/* locking is tricky here, due to different rules for multicast and
	 * vlans we need to take rcu to find the vlan and make sure it has
	 * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
	 * multicast_lock which must be already held here, so the vlan's pmctx
	 * can safely be used on return
	 */
	rcu_read_lock();
	vlan = br_vlan_find(nbp_vlan_group(pg->key.port), pg->key.addr.vid);
	if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
		pmctx = &vlan->port_mcast_ctx;
	else
		pmctx = NULL;
	rcu_read_unlock();
out:
	return pmctx;
}

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
/* when snooping we need to check if the contexts should be used
 * in the following order:
 * - if pmctx is non-NULL (port), check if it should be used
 * - if pmctx is NULL (bridge), check if brmctx should be used
 */
static bool
br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
			    const struct net_bridge_mcast_port *pmctx)
{
	if (!netif_running(brmctx->br->dev))
		return false;

	if (pmctx)
		return !br_multicast_port_ctx_state_disabled(pmctx);
	else
		return !br_multicast_ctx_vlan_disabled(brmctx);
}

252 253 254 255 256 257 258 259 260 261 262 263 264
static bool br_port_group_equal(struct net_bridge_port_group *p,
				struct net_bridge_port *port,
				const unsigned char *src)
{
	if (p->key.port != port)
		return false;

	if (!(port->flags & BR_MULTICAST_TO_UNICAST))
		return true;

	return ether_addr_equal(src, p->eth_addr);
}

265 266
static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
				struct net_bridge_port_group *pg,
267 268 269 270
				struct br_ip *sg_ip)
{
	struct net_bridge_port_group_sg_key sg_key;
	struct net_bridge_port_group *src_pg;
271
	struct net_bridge_mcast *brmctx;
272 273

	memset(&sg_key, 0, sizeof(sg_key));
274
	brmctx = br_multicast_port_ctx_get_global(pmctx);
275 276
	sg_key.port = pg->key.port;
	sg_key.addr = *sg_ip;
277
	if (br_sg_port_find(brmctx->br, &sg_key))
278 279
		return;

280 281
	src_pg = __br_multicast_add_group(brmctx, pmctx,
					  sg_ip, pg->eth_addr,
282
					  MCAST_INCLUDE, false, false);
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	if (IS_ERR_OR_NULL(src_pg) ||
	    src_pg->rt_protocol != RTPROT_KERNEL)
		return;

	src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
}

static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
				struct br_ip *sg_ip)
{
	struct net_bridge_port_group_sg_key sg_key;
	struct net_bridge *br = pg->key.port->br;
	struct net_bridge_port_group *src_pg;

	memset(&sg_key, 0, sizeof(sg_key));
	sg_key.port = pg->key.port;
	sg_key.addr = *sg_ip;
	src_pg = br_sg_port_find(br, &sg_key);
	if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
	    src_pg->rt_protocol != RTPROT_KERNEL)
		return;

	br_multicast_find_del_pg(br, src_pg);
}

/* When a port group transitions to (or is added as) EXCLUDE we need to add it
 * to all other ports' S,G entries which are not blocked by the current group
 * for proper replication, the assumption is that any S,G blocked entries
 * are already added so the S,G,port lookup should skip them.
 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
 * deleted we need to remove it from all ports' S,G entries where it was
 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
 */
void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
				     u8 filter_mode)
{
	struct net_bridge *br = pg->key.port->br;
	struct net_bridge_port_group *pg_lst;
321
	struct net_bridge_mcast_port *pmctx;
322 323 324 325 326 327 328 329 330
	struct net_bridge_mdb_entry *mp;
	struct br_ip sg_ip;

	if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
		return;

	mp = br_mdb_ip_get(br, &pg->key.addr);
	if (!mp)
		return;
331 332 333
	pmctx = br_multicast_pg_to_port_ctx(pg);
	if (!pmctx)
		return;
334 335 336

	memset(&sg_ip, 0, sizeof(sg_ip));
	sg_ip = pg->key.addr;
337

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
	for (pg_lst = mlock_dereference(mp->ports, br);
	     pg_lst;
	     pg_lst = mlock_dereference(pg_lst->next, br)) {
		struct net_bridge_group_src *src_ent;

		if (pg_lst == pg)
			continue;
		hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
			if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
				continue;
			sg_ip.src = src_ent->addr.src;
			switch (filter_mode) {
			case MCAST_INCLUDE:
				__fwd_del_star_excl(pg, &sg_ip);
				break;
			case MCAST_EXCLUDE:
354
				__fwd_add_star_excl(pmctx, pg, &sg_ip);
355 356 357 358 359 360
				break;
			}
		}
	}
}

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
/* called when adding a new S,G with host_joined == false by default */
static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
				       struct net_bridge_port_group *sg)
{
	struct net_bridge_mdb_entry *sg_mp;

	if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
		return;
	if (!star_mp->host_joined)
		return;

	sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
	if (!sg_mp)
		return;
	sg_mp->host_joined = true;
}

/* set the host_joined state of all of *,G's S,G entries */
static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
{
	struct net_bridge *br = star_mp->br;
	struct net_bridge_mdb_entry *sg_mp;
	struct net_bridge_port_group *pg;
	struct br_ip sg_ip;

	if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
		return;

	memset(&sg_ip, 0, sizeof(sg_ip));
	sg_ip = star_mp->addr;
	for (pg = mlock_dereference(star_mp->ports, br);
	     pg;
	     pg = mlock_dereference(pg->next, br)) {
		struct net_bridge_group_src *src_ent;

		hlist_for_each_entry(src_ent, &pg->src_list, node) {
			if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
				continue;
			sg_ip.src = src_ent->addr.src;
			sg_mp = br_mdb_ip_get(br, &sg_ip);
			if (!sg_mp)
				continue;
			sg_mp->host_joined = star_mp->host_joined;
		}
	}
}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
{
	struct net_bridge_port_group __rcu **pp;
	struct net_bridge_port_group *p;

	/* *,G exclude ports are only added to S,G entries */
	if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
		return;

	/* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
	 * we should ignore perm entries since they're managed by user-space
	 */
	for (pp = &sgmp->ports;
	     (p = mlock_dereference(*pp, sgmp->br)) != NULL;
	     pp = &p->next)
		if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
				  MDB_PG_FLAGS_PERMANENT)))
			return;

427 428 429 430 431 432
	/* currently the host can only have joined the *,G which means
	 * we treat it as EXCLUDE {}, so for an S,G it's considered a
	 * STAR_EXCLUDE entry and we can safely leave it
	 */
	sgmp->host_joined = false;

433 434 435 436 437 438 439 440 441 442 443 444 445 446
	for (pp = &sgmp->ports;
	     (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
		if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
			br_multicast_del_pg(sgmp, p, pp);
		else
			pp = &p->next;
	}
}

void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
				       struct net_bridge_port_group *sg)
{
	struct net_bridge_port_group_sg_key sg_key;
	struct net_bridge *br = star_mp->br;
447
	struct net_bridge_mcast_port *pmctx;
448
	struct net_bridge_port_group *pg;
449
	struct net_bridge_mcast *brmctx;
450 451 452 453 454 455

	if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
		return;
	if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
		return;

456
	br_multicast_sg_host_state(star_mp, sg);
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
	memset(&sg_key, 0, sizeof(sg_key));
	sg_key.addr = sg->key.addr;
	/* we need to add all exclude ports to the S,G */
	for (pg = mlock_dereference(star_mp->ports, br);
	     pg;
	     pg = mlock_dereference(pg->next, br)) {
		struct net_bridge_port_group *src_pg;

		if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
			continue;

		sg_key.port = pg->key.port;
		if (br_sg_port_find(br, &sg_key))
			continue;

472 473 474 475 476
		pmctx = br_multicast_pg_to_port_ctx(pg);
		if (!pmctx)
			continue;
		brmctx = br_multicast_port_ctx_get_global(pmctx);

477
		src_pg = __br_multicast_add_group(brmctx, pmctx,
478 479
						  &sg->key.addr,
						  sg->eth_addr,
480
						  MCAST_INCLUDE, false, false);
481 482 483 484 485 486 487
		if (IS_ERR_OR_NULL(src_pg) ||
		    src_pg->rt_protocol != RTPROT_KERNEL)
			continue;
		src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
	}
}

488 489
static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
{
490
	struct net_bridge_mdb_entry *star_mp;
491
	struct net_bridge_mcast_port *pmctx;
492
	struct net_bridge_port_group *sg;
493
	struct net_bridge_mcast *brmctx;
494 495 496 497 498 499
	struct br_ip sg_ip;

	if (src->flags & BR_SGRP_F_INSTALLED)
		return;

	memset(&sg_ip, 0, sizeof(sg_ip));
500 501 502
	pmctx = br_multicast_pg_to_port_ctx(src->pg);
	if (!pmctx)
		return;
503
	brmctx = br_multicast_port_ctx_get_global(pmctx);
504 505
	sg_ip = src->pg->key.addr;
	sg_ip.src = src->addr.src;
506 507

	sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
508 509
				      src->pg->eth_addr, MCAST_INCLUDE, false,
				      !timer_pending(&src->timer));
510 511 512
	if (IS_ERR_OR_NULL(sg))
		return;
	src->flags |= BR_SGRP_F_INSTALLED;
513
	sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
514 515 516 517 518 519 520 521

	/* if it was added by user-space as perm we can skip next steps */
	if (sg->rt_protocol != RTPROT_KERNEL &&
	    (sg->flags & MDB_PG_FLAGS_PERMANENT))
		return;

	/* the kernel is now responsible for removing this S,G */
	del_timer(&sg->timer);
522 523 524 525 526
	star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
	if (!star_mp)
		return;

	br_multicast_sg_add_exclude_ports(star_mp, sg);
527 528
}

529 530
static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
					bool fastleave)
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
{
	struct net_bridge_port_group *p, *pg = src->pg;
	struct net_bridge_port_group __rcu **pp;
	struct net_bridge_mdb_entry *mp;
	struct br_ip sg_ip;

	memset(&sg_ip, 0, sizeof(sg_ip));
	sg_ip = pg->key.addr;
	sg_ip.src = src->addr.src;

	mp = br_mdb_ip_get(src->br, &sg_ip);
	if (!mp)
		return;

	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, src->br)) != NULL;
	     pp = &p->next) {
		if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
			continue;

		if (p->rt_protocol != RTPROT_KERNEL &&
		    (p->flags & MDB_PG_FLAGS_PERMANENT))
			break;

555 556
		if (fastleave)
			p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
557 558 559 560 561 562
		br_multicast_del_pg(mp, p, pp);
		break;
	}
	src->flags &= ~BR_SGRP_F_INSTALLED;
}

563
/* install S,G and based on src's timer enable or disable forwarding */
564 565
static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
{
566 567 568 569
	struct net_bridge_port_group_sg_key sg_key;
	struct net_bridge_port_group *sg;
	u8 old_flags;

570
	br_multicast_fwd_src_add(src);
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594

	memset(&sg_key, 0, sizeof(sg_key));
	sg_key.addr = src->pg->key.addr;
	sg_key.addr.src = src->addr.src;
	sg_key.port = src->pg->key.port;

	sg = br_sg_port_find(src->br, &sg_key);
	if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
		return;

	old_flags = sg->flags;
	if (timer_pending(&src->timer))
		sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
	else
		sg->flags |= MDB_PG_FLAGS_BLOCKED;

	if (old_flags != sg->flags) {
		struct net_bridge_mdb_entry *sg_mp;

		sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
		if (!sg_mp)
			return;
		br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
	}
595 596
}

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
{
	struct net_bridge_mdb_entry *mp;

	mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
	WARN_ON(!hlist_unhashed(&mp->mdb_node));
	WARN_ON(mp->ports);

	del_timer_sync(&mp->timer);
	kfree_rcu(mp, rcu);
}

static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
{
	struct net_bridge *br = mp->br;

	rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
			       br_mdb_rht_params);
	hlist_del_init_rcu(&mp->mdb_node);
	hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
	queue_work(system_long_wq, &br->mcast_gc_work);
}

620
static void br_multicast_group_expired(struct timer_list *t)
621
{
622
	struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
623 624 625
	struct net_bridge *br = mp->br;

	spin_lock(&br->multicast_lock);
626 627
	if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
	    timer_pending(&mp->timer))
628 629
		goto out;

630
	br_multicast_host_leave(mp, true);
631 632 633

	if (mp->ports)
		goto out;
634 635 636 637
	br_multicast_del_mdb_entry(mp);
out:
	spin_unlock(&br->multicast_lock);
}
638

639 640 641
static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
{
	struct net_bridge_group_src *src;
642

643 644
	src = container_of(gc, struct net_bridge_group_src, mcast_gc);
	WARN_ON(!hlist_unhashed(&src->node));
645

646 647
	del_timer_sync(&src->timer);
	kfree_rcu(src, rcu);
648 649
}

650 651
void br_multicast_del_group_src(struct net_bridge_group_src *src,
				bool fastleave)
652
{
653
	struct net_bridge *br = src->pg->key.port->br;
654

655
	br_multicast_fwd_src_remove(src, fastleave);
656 657
	hlist_del_init_rcu(&src->node);
	src->pg->src_ents--;
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
	hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
	queue_work(system_long_wq, &br->mcast_gc_work);
}

static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
{
	struct net_bridge_port_group *pg;

	pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
	WARN_ON(!hlist_unhashed(&pg->mglist));
	WARN_ON(!hlist_empty(&pg->src_list));

	del_timer_sync(&pg->rexmit_timer);
	del_timer_sync(&pg->timer);
	kfree_rcu(pg, rcu);
673 674
}

675 676 677 678
void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
			 struct net_bridge_port_group *pg,
			 struct net_bridge_port_group __rcu **pp)
{
679
	struct net_bridge *br = pg->key.port->br;
680 681
	struct net_bridge_group_src *ent;
	struct hlist_node *tmp;
682 683 684

	rcu_assign_pointer(*pp, pg->next);
	hlist_del_init(&pg->mglist);
685
	br_multicast_eht_clean_sets(pg);
686
	hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
687
		br_multicast_del_group_src(ent, false);
688
	br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
689 690 691
	if (!br_multicast_is_star_g(&mp->addr)) {
		rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
				       br_sg_port_rht_params);
692
		br_multicast_sg_del_exclude_ports(mp);
693
	} else {
694
		br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
695
	}
696 697
	hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
	queue_work(system_long_wq, &br->mcast_gc_work);
698 699 700 701 702 703 704

	if (!mp->ports && !mp->host_joined && netif_running(br->dev))
		mod_timer(&mp->timer, jiffies);
}

static void br_multicast_find_del_pg(struct net_bridge *br,
				     struct net_bridge_port_group *pg)
705
{
706
	struct net_bridge_port_group __rcu **pp;
707 708
	struct net_bridge_mdb_entry *mp;
	struct net_bridge_port_group *p;
709

710
	mp = br_mdb_ip_get(br, &pg->key.addr);
711 712 713
	if (WARN_ON(!mp))
		return;

714 715 716
	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
717 718 719
		if (p != pg)
			continue;

720
		br_multicast_del_pg(mp, pg, pp);
721 722 723 724 725 726
		return;
	}

	WARN_ON(1);
}

727
static void br_multicast_port_group_expired(struct timer_list *t)
728
{
729
	struct net_bridge_port_group *pg = from_timer(pg, t, timer);
730
	struct net_bridge_group_src *src_ent;
731
	struct net_bridge *br = pg->key.port->br;
732 733
	struct hlist_node *tmp;
	bool changed;
734 735 736

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
737
	    hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
738 739
		goto out;

740 741 742 743
	changed = !!(pg->filter_mode == MCAST_EXCLUDE);
	pg->filter_mode = MCAST_INCLUDE;
	hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
		if (!timer_pending(&src_ent->timer)) {
744
			br_multicast_del_group_src(src_ent, false);
745 746 747 748 749 750 751
			changed = true;
		}
	}

	if (hlist_empty(&pg->src_list)) {
		br_multicast_find_del_pg(br, pg);
	} else if (changed) {
752
		struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
753

754 755 756
		if (changed && br_multicast_is_star_g(&pg->key.addr))
			br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);

757 758 759 760
		if (WARN_ON(!mp))
			goto out;
		br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
	}
761 762 763 764
out:
	spin_unlock(&br->multicast_lock);
}

765 766 767 768 769 770 771 772 773 774 775
static void br_multicast_gc(struct hlist_head *head)
{
	struct net_bridge_mcast_gc *gcent;
	struct hlist_node *tmp;

	hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
		hlist_del_init(&gcent->gc_node);
		gcent->destroy(gcent);
	}
}

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
					     struct net_bridge_mcast_port *pmctx,
					     struct sk_buff *skb)
{
	struct net_bridge_vlan *vlan = NULL;

	if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
		vlan = pmctx->vlan;
	else if (br_multicast_ctx_is_vlan(brmctx))
		vlan = brmctx->vlan;

	if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
		u16 vlan_proto;

		if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
			return;
		__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
	}
}

796
static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
797
						    struct net_bridge_mcast_port *pmctx,
798 799 800
						    struct net_bridge_port_group *pg,
						    __be32 ip_dst, __be32 group,
						    bool with_srcs, bool over_lmqt,
801 802
						    u8 sflag, u8 *igmp_type,
						    bool *need_rexmit)
803
{
804
	struct net_bridge_port *p = pg ? pg->key.port : NULL;
805 806 807
	struct net_bridge_group_src *ent;
	size_t pkt_size, igmp_hdr_size;
	unsigned long now = jiffies;
808
	struct igmpv3_query *ihv3;
809 810
	void *csum_start = NULL;
	__sum16 *csum = NULL;
811 812 813
	struct sk_buff *skb;
	struct igmphdr *ih;
	struct ethhdr *eth;
814
	unsigned long lmqt;
815
	struct iphdr *iph;
816
	u16 lmqt_srcs = 0;
817

818
	igmp_hdr_size = sizeof(*ih);
819
	if (brmctx->multicast_igmp_version == 3) {
820
		igmp_hdr_size = sizeof(*ihv3);
821
		if (pg && with_srcs) {
822 823
			lmqt = now + (brmctx->multicast_last_member_interval *
				      brmctx->multicast_last_member_count);
824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
			hlist_for_each_entry(ent, &pg->src_list, node) {
				if (over_lmqt == time_after(ent->timer.expires,
							    lmqt) &&
				    ent->src_query_rexmit_cnt > 0)
					lmqt_srcs++;
			}

			if (!lmqt_srcs)
				return NULL;
			igmp_hdr_size += lmqt_srcs * sizeof(__be32);
		}
	}

	pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
	if ((p && pkt_size > p->dev->mtu) ||
839
	    pkt_size > brmctx->br->dev->mtu)
840 841
		return NULL;

842
	skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
843 844 845
	if (!skb)
		goto out;

846
	__br_multicast_query_handle_vlan(brmctx, pmctx, skb);
847 848 849 850 851
	skb->protocol = htons(ETH_P_IP);

	skb_reset_mac_header(skb);
	eth = eth_hdr(skb);

852
	ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
853
	ip_eth_mc_map(ip_dst, eth->h_dest);
854 855 856 857 858
	eth->h_proto = htons(ETH_P_IP);
	skb_put(skb, sizeof(*eth));

	skb_set_network_header(skb, skb->len);
	iph = ip_hdr(skb);
859
	iph->tot_len = htons(pkt_size - sizeof(*eth));
860 861 862 863 864 865 866 867

	iph->version = 4;
	iph->ihl = 6;
	iph->tos = 0xc0;
	iph->id = 0;
	iph->frag_off = htons(IP_DF);
	iph->ttl = 1;
	iph->protocol = IPPROTO_IGMP;
868 869
	iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
		     inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
870
	iph->daddr = ip_dst;
871 872 873 874 875 876 877 878
	((u8 *)&iph[1])[0] = IPOPT_RA;
	((u8 *)&iph[1])[1] = 4;
	((u8 *)&iph[1])[2] = 0;
	((u8 *)&iph[1])[3] = 0;
	ip_send_check(iph);
	skb_put(skb, 24);

	skb_set_transport_header(skb, skb->len);
879
	*igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
880

881
	switch (brmctx->multicast_igmp_version) {
882 883 884
	case 2:
		ih = igmp_hdr(skb);
		ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
885 886
		ih->code = (group ? brmctx->multicast_last_member_interval :
				    brmctx->multicast_query_response_interval) /
887 888 889
			   (HZ / IGMP_TIMER_SCALE);
		ih->group = group;
		ih->csum = 0;
890 891
		csum = &ih->csum;
		csum_start = (void *)ih;
892 893 894 895
		break;
	case 3:
		ihv3 = igmpv3_query_hdr(skb);
		ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
896 897
		ihv3->code = (group ? brmctx->multicast_last_member_interval :
				      brmctx->multicast_query_response_interval) /
898 899
			     (HZ / IGMP_TIMER_SCALE);
		ihv3->group = group;
900
		ihv3->qqic = brmctx->multicast_query_interval / HZ;
901
		ihv3->nsrcs = htons(lmqt_srcs);
902
		ihv3->resv = 0;
903
		ihv3->suppress = sflag;
904 905
		ihv3->qrv = 2;
		ihv3->csum = 0;
906 907 908 909 910 911 912 913 914 915
		csum = &ihv3->csum;
		csum_start = (void *)ihv3;
		if (!pg || !with_srcs)
			break;

		lmqt_srcs = 0;
		hlist_for_each_entry(ent, &pg->src_list, node) {
			if (over_lmqt == time_after(ent->timer.expires,
						    lmqt) &&
			    ent->src_query_rexmit_cnt > 0) {
916
				ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
917
				ent->src_query_rexmit_cnt--;
918 919
				if (need_rexmit && ent->src_query_rexmit_cnt)
					*need_rexmit = true;
920 921 922 923 924 925
			}
		}
		if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
			kfree_skb(skb);
			return NULL;
		}
926 927 928
		break;
	}

929 930 931 932 933 934
	if (WARN_ON(!csum || !csum_start)) {
		kfree_skb(skb);
		return NULL;
	}

	*csum = ip_compute_csum(csum_start, igmp_hdr_size);
935
	skb_put(skb, igmp_hdr_size);
936 937 938 939 940 941
	__skb_pull(skb, sizeof(*eth));

out:
	return skb;
}

E
Eric Dumazet 已提交
942
#if IS_ENABLED(CONFIG_IPV6)
943
static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
944
						    struct net_bridge_mcast_port *pmctx,
945 946 947 948
						    struct net_bridge_port_group *pg,
						    const struct in6_addr *ip6_dst,
						    const struct in6_addr *group,
						    bool with_srcs, bool over_llqt,
949 950
						    u8 sflag, u8 *igmp_type,
						    bool *need_rexmit)
951
{
952
	struct net_bridge_port *p = pg ? pg->key.port : NULL;
953 954 955
	struct net_bridge_group_src *ent;
	size_t pkt_size, mld_hdr_size;
	unsigned long now = jiffies;
956
	struct mld2_query *mld2q;
957
	void *csum_start = NULL;
958
	unsigned long interval;
959
	__sum16 *csum = NULL;
960 961
	struct ipv6hdr *ip6h;
	struct mld_msg *mldq;
962
	struct sk_buff *skb;
963
	unsigned long llqt;
964
	struct ethhdr *eth;
965
	u16 llqt_srcs = 0;
966 967
	u8 *hopopt;

968
	mld_hdr_size = sizeof(*mldq);
969
	if (brmctx->multicast_mld_version == 2) {
970
		mld_hdr_size = sizeof(*mld2q);
971
		if (pg && with_srcs) {
972 973
			llqt = now + (brmctx->multicast_last_member_interval *
				      brmctx->multicast_last_member_count);
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
			hlist_for_each_entry(ent, &pg->src_list, node) {
				if (over_llqt == time_after(ent->timer.expires,
							    llqt) &&
				    ent->src_query_rexmit_cnt > 0)
					llqt_srcs++;
			}

			if (!llqt_srcs)
				return NULL;
			mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
		}
	}

	pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
	if ((p && pkt_size > p->dev->mtu) ||
989
	    pkt_size > brmctx->br->dev->mtu)
990 991
		return NULL;

992
	skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
993 994 995
	if (!skb)
		goto out;

996
	__br_multicast_query_handle_vlan(brmctx, pmctx, skb);
997 998 999 1000 1001 1002
	skb->protocol = htons(ETH_P_IPV6);

	/* Ethernet header */
	skb_reset_mac_header(skb);
	eth = eth_hdr(skb);

1003
	ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
1004 1005 1006 1007 1008 1009 1010 1011
	eth->h_proto = htons(ETH_P_IPV6);
	skb_put(skb, sizeof(*eth));

	/* IPv6 header + HbH option */
	skb_set_network_header(skb, skb->len);
	ip6h = ipv6_hdr(skb);

	*(__force __be32 *)ip6h = htonl(0x60000000);
1012
	ip6h->payload_len = htons(8 + mld_hdr_size);
1013 1014
	ip6h->nexthdr = IPPROTO_HOPOPTS;
	ip6h->hop_limit = 1;
1015
	ip6h->daddr = *ip6_dst;
1016 1017
	if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
			       &ip6h->daddr, 0, &ip6h->saddr)) {
1018
		kfree_skb(skb);
1019
		br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
1020 1021
		return NULL;
	}
1022

1023
	br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
1024
	ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
1025 1026 1027 1028 1029 1030 1031 1032

	hopopt = (u8 *)(ip6h + 1);
	hopopt[0] = IPPROTO_ICMPV6;		/* next hdr */
	hopopt[1] = 0;				/* length of HbH */
	hopopt[2] = IPV6_TLV_ROUTERALERT;	/* Router Alert */
	hopopt[3] = 2;				/* Length of RA Option */
	hopopt[4] = 0;				/* Type = 0x0000 (MLD) */
	hopopt[5] = 0;
1033 1034
	hopopt[6] = IPV6_TLV_PAD1;		/* Pad1 */
	hopopt[7] = IPV6_TLV_PAD1;		/* Pad1 */
1035 1036 1037 1038 1039

	skb_put(skb, sizeof(*ip6h) + 8);

	/* ICMPv6 */
	skb_set_transport_header(skb, skb->len);
1040
	interval = ipv6_addr_any(group) ?
1041 1042
			brmctx->multicast_query_response_interval :
			brmctx->multicast_last_member_interval;
1043
	*igmp_type = ICMPV6_MGM_QUERY;
1044
	switch (brmctx->multicast_mld_version) {
1045 1046 1047 1048 1049 1050 1051
	case 1:
		mldq = (struct mld_msg *)icmp6_hdr(skb);
		mldq->mld_type = ICMPV6_MGM_QUERY;
		mldq->mld_code = 0;
		mldq->mld_cksum = 0;
		mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
		mldq->mld_reserved = 0;
1052 1053 1054
		mldq->mld_mca = *group;
		csum = &mldq->mld_cksum;
		csum_start = (void *)mldq;
1055 1056 1057
		break;
	case 2:
		mld2q = (struct mld2_query *)icmp6_hdr(skb);
1058
		mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
1059 1060 1061 1062 1063
		mld2q->mld2q_type = ICMPV6_MGM_QUERY;
		mld2q->mld2q_code = 0;
		mld2q->mld2q_cksum = 0;
		mld2q->mld2q_resv1 = 0;
		mld2q->mld2q_resv2 = 0;
1064
		mld2q->mld2q_suppress = sflag;
1065
		mld2q->mld2q_qrv = 2;
1066
		mld2q->mld2q_nsrcs = htons(llqt_srcs);
1067
		mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
		mld2q->mld2q_mca = *group;
		csum = &mld2q->mld2q_cksum;
		csum_start = (void *)mld2q;
		if (!pg || !with_srcs)
			break;

		llqt_srcs = 0;
		hlist_for_each_entry(ent, &pg->src_list, node) {
			if (over_llqt == time_after(ent->timer.expires,
						    llqt) &&
			    ent->src_query_rexmit_cnt > 0) {
1079
				mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
1080
				ent->src_query_rexmit_cnt--;
1081 1082
				if (need_rexmit && ent->src_query_rexmit_cnt)
					*need_rexmit = true;
1083 1084 1085 1086 1087 1088
			}
		}
		if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
			kfree_skb(skb);
			return NULL;
		}
1089 1090
		break;
	}
1091

1092 1093 1094 1095 1096 1097 1098 1099 1100
	if (WARN_ON(!csum || !csum_start)) {
		kfree_skb(skb);
		return NULL;
	}

	*csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
				IPPROTO_ICMPV6,
				csum_partial(csum_start, mld_hdr_size, 0));
	skb_put(skb, mld_hdr_size);
1101 1102 1103 1104 1105 1106 1107
	__skb_pull(skb, sizeof(*eth));

out:
	return skb;
}
#endif

1108
static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
1109
						struct net_bridge_mcast_port *pmctx,
1110 1111 1112 1113
						struct net_bridge_port_group *pg,
						struct br_ip *ip_dst,
						struct br_ip *group,
						bool with_srcs, bool over_lmqt,
1114 1115
						u8 sflag, u8 *igmp_type,
						bool *need_rexmit)
1116
{
1117 1118 1119
	__be32 ip4_dst;

	switch (group->proto) {
1120
	case htons(ETH_P_IP):
1121
		ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
1122
		return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
1123
						    ip4_dst, group->dst.ip4,
1124
						    with_srcs, over_lmqt,
1125 1126
						    sflag, igmp_type,
						    need_rexmit);
E
Eric Dumazet 已提交
1127
#if IS_ENABLED(CONFIG_IPV6)
1128 1129 1130 1131
	case htons(ETH_P_IPV6): {
		struct in6_addr ip6_dst;

		if (ip_dst)
1132
			ip6_dst = ip_dst->dst.ip6;
1133 1134 1135 1136
		else
			ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
				      htonl(1));

1137
		return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
1138
						    &ip6_dst, &group->dst.ip6,
1139
						    with_srcs, over_lmqt,
1140 1141
						    sflag, igmp_type,
						    need_rexmit);
1142
	}
1143
#endif
1144 1145 1146 1147
	}
	return NULL;
}

1148
struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
1149
						    struct br_ip *group)
1150 1151
{
	struct net_bridge_mdb_entry *mp;
1152
	int err;
1153

1154 1155 1156
	mp = br_mdb_ip_get(br, group);
	if (mp)
		return mp;
1157

1158 1159 1160
	if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
		br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
		return ERR_PTR(-E2BIG);
1161 1162 1163 1164
	}

	mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
	if (unlikely(!mp))
1165
		return ERR_PTR(-ENOMEM);
1166 1167

	mp->br = br;
1168
	mp->addr = *group;
1169
	mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
1170
	timer_setup(&mp->timer, br_multicast_group_expired, 0);
1171 1172 1173 1174 1175 1176 1177 1178
	err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
					    br_mdb_rht_params);
	if (err) {
		kfree(mp);
		mp = ERR_PTR(err);
	} else {
		hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
	}
1179

1180 1181 1182
	return mp;
}

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
static void br_multicast_group_src_expired(struct timer_list *t)
{
	struct net_bridge_group_src *src = from_timer(src, t, timer);
	struct net_bridge_port_group *pg;
	struct net_bridge *br = src->br;

	spin_lock(&br->multicast_lock);
	if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
	    timer_pending(&src->timer))
		goto out;

	pg = src->pg;
	if (pg->filter_mode == MCAST_INCLUDE) {
1196
		br_multicast_del_group_src(src, false);
1197 1198 1199
		if (!hlist_empty(&pg->src_list))
			goto out;
		br_multicast_find_del_pg(br, pg);
1200 1201
	} else {
		br_multicast_fwd_src_handle(src);
1202
	}
1203

1204 1205 1206 1207
out:
	spin_unlock(&br->multicast_lock);
}

1208
struct net_bridge_group_src *
1209 1210 1211 1212 1213 1214 1215
br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
{
	struct net_bridge_group_src *ent;

	switch (ip->proto) {
	case htons(ETH_P_IP):
		hlist_for_each_entry(ent, &pg->src_list, node)
1216
			if (ip->src.ip4 == ent->addr.src.ip4)
1217 1218 1219 1220 1221
				return ent;
		break;
#if IS_ENABLED(CONFIG_IPV6)
	case htons(ETH_P_IPV6):
		hlist_for_each_entry(ent, &pg->src_list, node)
1222
			if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
				return ent;
		break;
#endif
	}

	return NULL;
}

static struct net_bridge_group_src *
br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
{
	struct net_bridge_group_src *grp_src;

	if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
		return NULL;

	switch (src_ip->proto) {
	case htons(ETH_P_IP):
1241 1242
		if (ipv4_is_zeronet(src_ip->src.ip4) ||
		    ipv4_is_multicast(src_ip->src.ip4))
1243 1244 1245 1246
			return NULL;
		break;
#if IS_ENABLED(CONFIG_IPV6)
	case htons(ETH_P_IPV6):
1247 1248
		if (ipv6_addr_any(&src_ip->src.ip6) ||
		    ipv6_addr_is_multicast(&src_ip->src.ip6))
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
			return NULL;
		break;
#endif
	}

	grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
	if (unlikely(!grp_src))
		return NULL;

	grp_src->pg = pg;
1259
	grp_src->br = pg->key.port->br;
1260
	grp_src->addr = *src_ip;
1261
	grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
1262 1263 1264 1265 1266 1267 1268 1269
	timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);

	hlist_add_head_rcu(&grp_src->node, &pg->src_list);
	pg->src_ents++;

	return grp_src;
}

1270 1271 1272
struct net_bridge_port_group *br_multicast_new_port_group(
			struct net_bridge_port *port,
			struct br_ip *group,
1273
			struct net_bridge_port_group __rcu *next,
F
Felix Fietkau 已提交
1274
			unsigned char flags,
1275
			const unsigned char *src,
1276 1277
			u8 filter_mode,
			u8 rt_protocol)
1278 1279 1280 1281 1282 1283 1284
{
	struct net_bridge_port_group *p;

	p = kzalloc(sizeof(*p), GFP_ATOMIC);
	if (unlikely(!p))
		return NULL;

1285 1286
	p->key.addr = *group;
	p->key.port = port;
1287
	p->flags = flags;
1288
	p->filter_mode = filter_mode;
1289
	p->rt_protocol = rt_protocol;
1290
	p->eht_host_tree = RB_ROOT;
1291
	p->eht_set_tree = RB_ROOT;
1292
	p->mcast_gc.destroy = br_multicast_destroy_port_group;
1293
	INIT_HLIST_HEAD(&p->src_list);
1294 1295 1296 1297 1298 1299 1300 1301

	if (!br_multicast_is_star_g(group) &&
	    rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
					  br_sg_port_rht_params)) {
		kfree(p);
		return NULL;
	}

1302
	rcu_assign_pointer(p->next, next);
1303
	timer_setup(&p->timer, br_multicast_port_group_expired, 0);
1304 1305
	timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
	hlist_add_head(&p->mglist, &port->mglist);
F
Felix Fietkau 已提交
1306 1307 1308 1309

	if (src)
		memcpy(p->eth_addr, src, ETH_ALEN);
	else
1310
		eth_broadcast_addr(p->eth_addr);
F
Felix Fietkau 已提交
1311

1312 1313 1314
	return p;
}

1315 1316
void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
			    struct net_bridge_mdb_entry *mp, bool notify)
1317 1318 1319
{
	if (!mp->host_joined) {
		mp->host_joined = true;
1320 1321
		if (br_multicast_is_star_g(&mp->addr))
			br_multicast_star_g_host_state(mp);
1322
		if (notify)
1323
			br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
1324
	}
1325 1326 1327 1328

	if (br_group_is_l2(&mp->addr))
		return;

1329
	mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
1330 1331 1332 1333 1334 1335 1336 1337
}

void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
{
	if (!mp->host_joined)
		return;

	mp->host_joined = false;
1338 1339
	if (br_multicast_is_star_g(&mp->addr))
		br_multicast_star_g_host_state(mp);
1340
	if (notify)
1341
		br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
1342 1343
}

1344
static struct net_bridge_port_group *
1345 1346
__br_multicast_add_group(struct net_bridge_mcast *brmctx,
			 struct net_bridge_mcast_port *pmctx,
1347 1348 1349
			 struct br_ip *group,
			 const unsigned char *src,
			 u8 filter_mode,
1350 1351
			 bool igmpv2_mldv1,
			 bool blocked)
1352
{
1353
	struct net_bridge_port_group __rcu **pp;
1354
	struct net_bridge_port_group *p = NULL;
1355
	struct net_bridge_mdb_entry *mp;
1356
	unsigned long now = jiffies;
1357

1358
	if (!br_multicast_ctx_should_use(brmctx, pmctx))
1359 1360
		goto out;

1361
	mp = br_multicast_new_group(brmctx->br, group);
1362
	if (IS_ERR(mp))
1363
		return ERR_CAST(mp);
1364

1365
	if (!pmctx) {
1366
		br_multicast_host_join(brmctx, mp, true);
1367 1368 1369
		goto out;
	}

1370
	for (pp = &mp->ports;
1371
	     (p = mlock_dereference(*pp, brmctx->br)) != NULL;
1372
	     pp = &p->next) {
1373
		if (br_port_group_equal(p, pmctx->port, src))
1374
			goto found;
1375
		if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
1376 1377 1378
			break;
	}

1379
	p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
1380
					filter_mode, RTPROT_KERNEL);
1381 1382 1383 1384
	if (unlikely(!p)) {
		p = ERR_PTR(-ENOMEM);
		goto out;
	}
1385
	rcu_assign_pointer(*pp, p);
1386 1387
	if (blocked)
		p->flags |= MDB_PG_FLAGS_BLOCKED;
1388
	br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
1389

1390
found:
1391
	if (igmpv2_mldv1)
1392
		mod_timer(&p->timer,
1393
			  now + brmctx->multicast_membership_interval);
1394

1395
out:
1396 1397 1398
	return p;
}

1399 1400
static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
				  struct net_bridge_mcast_port *pmctx,
1401 1402 1403 1404 1405 1406 1407
				  struct br_ip *group,
				  const unsigned char *src,
				  u8 filter_mode,
				  bool igmpv2_mldv1)
{
	struct net_bridge_port_group *pg;
	int err;
1408

1409 1410
	spin_lock(&brmctx->br->multicast_lock);
	pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
1411
				      igmpv2_mldv1, false);
1412
	/* NULL is considered valid for host joined groups */
1413
	err = PTR_ERR_OR_ZERO(pg);
1414
	spin_unlock(&brmctx->br->multicast_lock);
1415

1416 1417 1418
	return err;
}

1419 1420
static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
				      struct net_bridge_mcast_port *pmctx,
1421
				      __be32 group,
F
Felix Fietkau 已提交
1422
				      __u16 vid,
1423 1424
				      const unsigned char *src,
				      bool igmpv2)
1425 1426
{
	struct br_ip br_group;
1427
	u8 filter_mode;
1428 1429 1430 1431

	if (ipv4_is_local_multicast(group))
		return 0;

1432
	memset(&br_group, 0, sizeof(br_group));
1433
	br_group.dst.ip4 = group;
1434
	br_group.proto = htons(ETH_P_IP);
1435
	br_group.vid = vid;
1436
	filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1437

1438 1439
	return br_multicast_add_group(brmctx, pmctx, &br_group, src,
				      filter_mode, igmpv2);
1440 1441
}

E
Eric Dumazet 已提交
1442
#if IS_ENABLED(CONFIG_IPV6)
1443 1444
static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
				      struct net_bridge_mcast_port *pmctx,
1445
				      const struct in6_addr *group,
F
Felix Fietkau 已提交
1446
				      __u16 vid,
1447 1448
				      const unsigned char *src,
				      bool mldv1)
1449 1450
{
	struct br_ip br_group;
1451
	u8 filter_mode;
1452

1453
	if (ipv6_addr_is_ll_all_nodes(group))
1454 1455
		return 0;

1456
	memset(&br_group, 0, sizeof(br_group));
1457
	br_group.dst.ip6 = *group;
1458
	br_group.proto = htons(ETH_P_IPV6);
1459
	br_group.vid = vid;
1460
	filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1461

1462 1463
	return br_multicast_add_group(brmctx, pmctx, &br_group, src,
				      filter_mode, mldv1);
1464 1465 1466
}
#endif

1467 1468 1469 1470 1471 1472 1473 1474 1475
static bool br_multicast_rport_del(struct hlist_node *rlist)
{
	if (hlist_unhashed(rlist))
		return false;

	hlist_del_init_rcu(rlist);
	return true;
}

1476
static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1477
{
1478
	return br_multicast_rport_del(&pmctx->ip4_rlist);
1479 1480
}

1481
static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1482 1483
{
#if IS_ENABLED(CONFIG_IPV6)
1484
	return br_multicast_rport_del(&pmctx->ip6_rlist);
1485 1486 1487 1488 1489
#else
	return false;
#endif
}

1490
static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
1491 1492
					struct timer_list *t,
					struct hlist_node *rlist)
1493
{
1494
	struct net_bridge *br = pmctx->port->br;
1495
	bool del;
1496 1497

	spin_lock(&br->multicast_lock);
1498 1499
	if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
	    pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1500
	    timer_pending(t))
1501 1502
		goto out;

1503
	del = br_multicast_rport_del(rlist);
1504
	br_multicast_rport_del_notify(pmctx, del);
1505 1506 1507 1508
out:
	spin_unlock(&br->multicast_lock);
}

1509 1510
static void br_ip4_multicast_router_expired(struct timer_list *t)
{
1511 1512
	struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
							 ip4_mc_router_timer);
1513

1514
	br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
1515 1516
}

1517 1518 1519
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_router_expired(struct timer_list *t)
{
1520 1521
	struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
							 ip6_mc_router_timer);
1522

1523
	br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
1524 1525 1526
}
#endif

1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
static void br_mc_router_state_change(struct net_bridge *p,
				      bool is_mc_router)
{
	struct switchdev_attr attr = {
		.orig_dev = p->dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
		.flags = SWITCHDEV_F_DEFER,
		.u.mrouter = is_mc_router,
	};

1537
	switchdev_port_attr_set(p->dev, &attr, NULL);
1538 1539
}

1540
static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
1541
					      struct timer_list *timer)
1542
{
1543 1544 1545 1546 1547
	spin_lock(&brmctx->br->multicast_lock);
	if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
	    brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
	    br_ip4_multicast_is_router(brmctx) ||
	    br_ip6_multicast_is_router(brmctx))
1548 1549
		goto out;

1550
	br_mc_router_state_change(brmctx->br, false);
1551
out:
1552
	spin_unlock(&brmctx->br->multicast_lock);
1553 1554
}

1555 1556
static void br_ip4_multicast_local_router_expired(struct timer_list *t)
{
1557 1558
	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
						     ip4_mc_router_timer);
1559

1560
	br_multicast_local_router_expired(brmctx, t);
1561 1562
}

1563 1564 1565
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_local_router_expired(struct timer_list *t)
{
1566 1567
	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
						     ip6_mc_router_timer);
1568

1569
	br_multicast_local_router_expired(brmctx, t);
1570 1571 1572
}
#endif

1573
static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
1574
					 struct bridge_mcast_own_query *query)
1575
{
1576 1577
	spin_lock(&brmctx->br->multicast_lock);
	if (!netif_running(brmctx->br->dev) ||
1578
	    br_multicast_ctx_vlan_global_disabled(brmctx) ||
1579
	    !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1580 1581
		goto out;

1582
	br_multicast_start_querier(brmctx, query);
1583 1584

out:
1585
	spin_unlock(&brmctx->br->multicast_lock);
1586 1587
}

1588
static void br_ip4_multicast_querier_expired(struct timer_list *t)
1589
{
1590 1591
	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
						     ip4_other_query.timer);
1592

1593
	br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
1594 1595 1596
}

#if IS_ENABLED(CONFIG_IPV6)
1597
static void br_ip6_multicast_querier_expired(struct timer_list *t)
1598
{
1599 1600
	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
						     ip6_other_query.timer);
1601

1602
	br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
1603 1604 1605
}
#endif

1606
static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
1607 1608 1609 1610
					    struct br_ip *ip,
					    struct sk_buff *skb)
{
	if (ip->proto == htons(ETH_P_IP))
1611
		brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
1612 1613
#if IS_ENABLED(CONFIG_IPV6)
	else
1614
		brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
1615 1616 1617
#endif
}

1618 1619
static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
				      struct net_bridge_mcast_port *pmctx,
1620 1621 1622 1623
				      struct net_bridge_port_group *pg,
				      struct br_ip *ip_dst,
				      struct br_ip *group,
				      bool with_srcs,
1624 1625
				      u8 sflag,
				      bool *need_rexmit)
1626
{
1627
	bool over_lmqt = !!sflag;
1628
	struct sk_buff *skb;
1629
	u8 igmp_type;
1630

1631 1632
	if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
	    !br_multicast_ctx_matches_vlan_snooping(brmctx))
1633 1634
		return;

1635
again_under_lmqt:
1636 1637
	skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
				       with_srcs, over_lmqt, sflag, &igmp_type,
1638
				       need_rexmit);
1639
	if (!skb)
1640
		return;
1641

1642 1643 1644
	if (pmctx) {
		skb->dev = pmctx->port->dev;
		br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
1645
				   BR_MCAST_DIR_TX);
1646
		NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1647
			dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
1648
			br_dev_queue_push_xmit);
1649 1650 1651 1652 1653

		if (over_lmqt && with_srcs && sflag) {
			over_lmqt = false;
			goto again_under_lmqt;
		}
1654
	} else {
1655 1656
		br_multicast_select_own_querier(brmctx, group, skb);
		br_multicast_count(brmctx->br, NULL, skb, igmp_type,
1657
				   BR_MCAST_DIR_RX);
1658
		netif_rx(skb);
1659
	}
1660 1661
}

1662 1663
static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
				    struct net_bridge_mcast_port *pmctx,
1664
				    struct bridge_mcast_own_query *own_query)
1665
{
1666
	struct bridge_mcast_other_query *other_query = NULL;
1667 1668
	struct br_ip br_group;
	unsigned long time;
1669

1670
	if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1671
	    !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
1672
	    !brmctx->multicast_querier)
1673 1674
		return;

1675
	memset(&br_group.dst, 0, sizeof(br_group.dst));
1676

1677 1678
	if (pmctx ? (own_query == &pmctx->ip4_own_query) :
		    (own_query == &brmctx->ip4_own_query)) {
1679
		other_query = &brmctx->ip4_other_query;
1680
		br_group.proto = htons(ETH_P_IP);
E
Eric Dumazet 已提交
1681
#if IS_ENABLED(CONFIG_IPV6)
1682
	} else {
1683
		other_query = &brmctx->ip6_other_query;
1684
		br_group.proto = htons(ETH_P_IPV6);
1685
#endif
1686 1687
	}

1688
	if (!other_query || timer_pending(&other_query->timer))
1689 1690
		return;

1691 1692
	__br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
				  0, NULL);
1693 1694

	time = jiffies;
1695 1696 1697
	time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
		brmctx->multicast_startup_query_interval :
		brmctx->multicast_query_interval;
1698
	mod_timer(&own_query->timer, time);
1699 1700
}

1701
static void
1702
br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
1703
				struct bridge_mcast_own_query *query)
1704
{
1705
	struct net_bridge *br = pmctx->port->br;
1706
	struct net_bridge_mcast *brmctx;
1707 1708

	spin_lock(&br->multicast_lock);
1709
	if (br_multicast_port_ctx_state_stopped(pmctx))
1710
		goto out;
1711

1712 1713
	brmctx = br_multicast_port_ctx_get_global(pmctx);
	if (query->startup_sent < brmctx->multicast_startup_query_count)
1714
		query->startup_sent++;
1715

1716
	br_multicast_send_query(brmctx, pmctx, query);
1717 1718 1719 1720 1721

out:
	spin_unlock(&br->multicast_lock);
}

1722
static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1723
{
1724 1725
	struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
							 ip4_own_query.timer);
1726

1727
	br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
1728 1729 1730
}

#if IS_ENABLED(CONFIG_IPV6)
1731
static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1732
{
1733 1734
	struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
							 ip6_own_query.timer);
1735

1736
	br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
1737 1738 1739
}
#endif

1740 1741 1742 1743
static void br_multicast_port_group_rexmit(struct timer_list *t)
{
	struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
	struct bridge_mcast_other_query *other_query = NULL;
1744
	struct net_bridge *br = pg->key.port->br;
1745 1746
	struct net_bridge_mcast_port *pmctx;
	struct net_bridge_mcast *brmctx;
1747 1748 1749 1750
	bool need_rexmit = false;

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1751
	    !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1752 1753
		goto out;

1754 1755 1756 1757
	pmctx = br_multicast_pg_to_port_ctx(pg);
	if (!pmctx)
		goto out;
	brmctx = br_multicast_port_ctx_get_global(pmctx);
1758 1759 1760
	if (!brmctx->multicast_querier)
		goto out;

1761
	if (pg->key.addr.proto == htons(ETH_P_IP))
1762
		other_query = &brmctx->ip4_other_query;
1763 1764
#if IS_ENABLED(CONFIG_IPV6)
	else
1765
		other_query = &brmctx->ip6_other_query;
1766 1767 1768 1769 1770 1771 1772
#endif

	if (!other_query || timer_pending(&other_query->timer))
		goto out;

	if (pg->grp_query_rexmit_cnt) {
		pg->grp_query_rexmit_cnt--;
1773
		__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1774
					  &pg->key.addr, false, 1, NULL);
1775
	}
1776
	__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1777
				  &pg->key.addr, true, 0, &need_rexmit);
1778 1779 1780

	if (pg->grp_query_rexmit_cnt || need_rexmit)
		mod_timer(&pg->rexmit_timer, jiffies +
1781
					     brmctx->multicast_last_member_interval);
1782 1783 1784 1785
out:
	spin_unlock(&br->multicast_lock);
}

1786 1787
static int br_mc_disabled_update(struct net_device *dev, bool value,
				 struct netlink_ext_ack *extack)
1788 1789 1790 1791 1792
{
	struct switchdev_attr attr = {
		.orig_dev = dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
		.flags = SWITCHDEV_F_DEFER,
1793
		.u.mc_disabled = !value,
1794 1795
	};

1796
	return switchdev_port_attr_set(dev, &attr, extack);
1797 1798
}

1799 1800 1801
void br_multicast_port_ctx_init(struct net_bridge_port *port,
				struct net_bridge_vlan *vlan,
				struct net_bridge_mcast_port *pmctx)
1802
{
1803
	pmctx->port = port;
1804
	pmctx->vlan = vlan;
1805 1806
	pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
	timer_setup(&pmctx->ip4_mc_router_timer,
1807
		    br_ip4_multicast_router_expired, 0);
1808
	timer_setup(&pmctx->ip4_own_query.timer,
1809
		    br_ip4_multicast_port_query_expired, 0);
1810
#if IS_ENABLED(CONFIG_IPV6)
1811
	timer_setup(&pmctx->ip6_mc_router_timer,
1812
		    br_ip6_multicast_router_expired, 0);
1813
	timer_setup(&pmctx->ip6_own_query.timer,
1814
		    br_ip6_multicast_port_query_expired, 0);
1815
#endif
1816 1817
}

1818
void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
{
#if IS_ENABLED(CONFIG_IPV6)
	del_timer_sync(&pmctx->ip6_mc_router_timer);
#endif
	del_timer_sync(&pmctx->ip4_mc_router_timer);
}

int br_multicast_add_port(struct net_bridge_port *port)
{
	int err;

	port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
1831
	br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
1832

1833 1834 1835 1836
	err = br_mc_disabled_update(port->dev,
				    br_opt_get(port->br,
					       BROPT_MULTICAST_ENABLED),
				    NULL);
1837
	if (err && err != -EOPNOTSUPP)
1838
		return err;
1839

1840 1841 1842 1843 1844
	port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
	if (!port->mcast_stats)
		return -ENOMEM;

	return 0;
1845 1846 1847 1848
}

void br_multicast_del_port(struct net_bridge_port *port)
{
1849 1850
	struct net_bridge *br = port->br;
	struct net_bridge_port_group *pg;
1851
	HLIST_HEAD(deleted_head);
1852 1853 1854 1855 1856
	struct hlist_node *n;

	/* Take care of the remaining groups, only perm ones should be left */
	spin_lock_bh(&br->multicast_lock);
	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1857
		br_multicast_find_del_pg(br, pg);
1858
	hlist_move_list(&br->mcast_gc_list, &deleted_head);
1859
	spin_unlock_bh(&br->multicast_lock);
1860
	br_multicast_gc(&deleted_head);
1861
	br_multicast_port_ctx_deinit(&port->multicast_ctx);
1862
	free_percpu(port->mcast_stats);
1863 1864
}

1865
static void br_multicast_enable(struct bridge_mcast_own_query *query)
1866
{
1867
	query->startup_sent = 0;
1868

1869 1870 1871
	if (try_to_del_timer_sync(&query->timer) >= 0 ||
	    del_timer(&query->timer))
		mod_timer(&query->timer, jiffies);
1872 1873
}

1874
static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
1875
{
1876
	struct net_bridge *br = pmctx->port->br;
1877
	struct net_bridge_mcast *brmctx;
1878

1879 1880 1881
	brmctx = br_multicast_port_ctx_get_global(pmctx);
	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
	    !netif_running(br->dev))
1882
		return;
1883

1884
	br_multicast_enable(&pmctx->ip4_own_query);
1885
#if IS_ENABLED(CONFIG_IPV6)
1886
	br_multicast_enable(&pmctx->ip6_own_query);
1887
#endif
1888 1889 1890
	if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
		br_ip4_multicast_add_router(brmctx, pmctx);
		br_ip6_multicast_add_router(brmctx, pmctx);
1891
	}
1892
}
1893

1894 1895 1896 1897
void br_multicast_enable_port(struct net_bridge_port *port)
{
	struct net_bridge *br = port->br;

1898
	spin_lock_bh(&br->multicast_lock);
1899
	__br_multicast_enable_port_ctx(&port->multicast_ctx);
1900
	spin_unlock_bh(&br->multicast_lock);
1901 1902
}

1903
static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
1904 1905
{
	struct net_bridge_port_group *pg;
1906
	struct hlist_node *n;
1907
	bool del = false;
1908

1909 1910 1911 1912 1913
	hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
		if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
		    (!br_multicast_port_ctx_is_vlan(pmctx) ||
		     pg->key.addr.vid == pmctx->vlan->vid))
			br_multicast_find_del_pg(pmctx->port->br, pg);
1914

1915 1916 1917 1918
	del |= br_ip4_multicast_rport_del(pmctx);
	del_timer(&pmctx->ip4_mc_router_timer);
	del_timer(&pmctx->ip4_own_query.timer);
	del |= br_ip6_multicast_rport_del(pmctx);
1919
#if IS_ENABLED(CONFIG_IPV6)
1920 1921
	del_timer(&pmctx->ip6_mc_router_timer);
	del_timer(&pmctx->ip6_own_query.timer);
1922
#endif
1923
	br_multicast_rport_del_notify(pmctx, del);
1924 1925 1926 1927
}

void br_multicast_disable_port(struct net_bridge_port *port)
{
1928
	spin_lock_bh(&port->br->multicast_lock);
1929
	__br_multicast_disable_port_ctx(&port->multicast_ctx);
1930
	spin_unlock_bh(&port->br->multicast_lock);
1931 1932
}

1933 1934 1935 1936 1937 1938 1939 1940
static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
{
	struct net_bridge_group_src *ent;
	struct hlist_node *tmp;
	int deleted = 0;

	hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
		if (ent->flags & BR_SGRP_F_DELETE) {
1941
			br_multicast_del_group_src(ent, false);
1942 1943 1944 1945 1946 1947
			deleted++;
		}

	return deleted;
}

1948 1949 1950 1951 1952 1953 1954
static void __grp_src_mod_timer(struct net_bridge_group_src *src,
				unsigned long expires)
{
	mod_timer(&src->timer, expires);
	br_multicast_fwd_src_handle(src);
}

1955 1956 1957
static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
					      struct net_bridge_mcast_port *pmctx,
					      struct net_bridge_port_group *pg)
1958 1959
{
	struct bridge_mcast_other_query *other_query = NULL;
1960
	u32 lmqc = brmctx->multicast_last_member_count;
1961 1962 1963
	unsigned long lmqt, lmi, now = jiffies;
	struct net_bridge_group_src *ent;

1964 1965
	if (!netif_running(brmctx->br->dev) ||
	    !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1966 1967
		return;

1968
	if (pg->key.addr.proto == htons(ETH_P_IP))
1969
		other_query = &brmctx->ip4_other_query;
1970 1971
#if IS_ENABLED(CONFIG_IPV6)
	else
1972
		other_query = &brmctx->ip6_other_query;
1973 1974
#endif

1975
	lmqt = now + br_multicast_lmqt(brmctx);
1976 1977 1978 1979
	hlist_for_each_entry(ent, &pg->src_list, node) {
		if (ent->flags & BR_SGRP_F_SEND) {
			ent->flags &= ~BR_SGRP_F_SEND;
			if (ent->timer.expires > lmqt) {
1980
				if (brmctx->multicast_querier &&
1981 1982 1983
				    other_query &&
				    !timer_pending(&other_query->timer))
					ent->src_query_rexmit_cnt = lmqc;
1984
				__grp_src_mod_timer(ent, lmqt);
1985 1986 1987 1988
			}
		}
	}

1989
	if (!brmctx->multicast_querier ||
1990 1991 1992
	    !other_query || timer_pending(&other_query->timer))
		return;

1993
	__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1994
				  &pg->key.addr, true, 1, NULL);
1995

1996
	lmi = now + brmctx->multicast_last_member_interval;
1997 1998 1999 2000 2001
	if (!timer_pending(&pg->rexmit_timer) ||
	    time_after(pg->rexmit_timer.expires, lmi))
		mod_timer(&pg->rexmit_timer, lmi);
}

2002 2003 2004
static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
					struct net_bridge_mcast_port *pmctx,
					struct net_bridge_port_group *pg)
2005 2006 2007 2008
{
	struct bridge_mcast_other_query *other_query = NULL;
	unsigned long now = jiffies, lmi;

2009 2010
	if (!netif_running(brmctx->br->dev) ||
	    !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2011 2012
		return;

2013
	if (pg->key.addr.proto == htons(ETH_P_IP))
2014
		other_query = &brmctx->ip4_other_query;
2015 2016
#if IS_ENABLED(CONFIG_IPV6)
	else
2017
		other_query = &brmctx->ip6_other_query;
2018 2019
#endif

2020
	if (brmctx->multicast_querier &&
2021
	    other_query && !timer_pending(&other_query->timer)) {
2022 2023
		lmi = now + brmctx->multicast_last_member_interval;
		pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
2024
		__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2025
					  &pg->key.addr, false, 0, NULL);
2026 2027 2028 2029 2030 2031 2032
		if (!timer_pending(&pg->rexmit_timer) ||
		    time_after(pg->rexmit_timer.expires, lmi))
			mod_timer(&pg->rexmit_timer, lmi);
	}

	if (pg->filter_mode == MCAST_EXCLUDE &&
	    (!timer_pending(&pg->timer) ||
2033 2034
	     time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
		mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
2035 2036
}

2037 2038 2039 2040 2041
/* State          Msg type      New state                Actions
 * INCLUDE (A)    IS_IN (B)     INCLUDE (A+B)            (B)=GMI
 * INCLUDE (A)    ALLOW (B)     INCLUDE (A+B)            (B)=GMI
 * EXCLUDE (X,Y)  ALLOW (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
 */
2042 2043
static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
				     struct net_bridge_port_group *pg, void *h_addr,
2044 2045
				     void *srcs, u32 nsrcs, size_t addr_size,
				     int grec_type)
2046 2047 2048 2049 2050 2051 2052 2053
{
	struct net_bridge_group_src *ent;
	unsigned long now = jiffies;
	bool changed = false;
	struct br_ip src_ip;
	u32 src_idx;

	memset(&src_ip, 0, sizeof(src_ip));
2054
	src_ip.proto = pg->key.addr.proto;
2055
	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2056
		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2057 2058 2059 2060 2061 2062 2063 2064
		ent = br_multicast_find_group_src(pg, &src_ip);
		if (!ent) {
			ent = br_multicast_new_group_src(pg, &src_ip);
			if (ent)
				changed = true;
		}

		if (ent)
2065
			__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2066 2067
	}

2068 2069
	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
				    grec_type))
2070 2071
		changed = true;

2072 2073 2074
	return changed;
}

2075 2076 2077 2078 2079
/* State          Msg type      New state                Actions
 * INCLUDE (A)    IS_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
 *                                                       Delete (A-B)
 *                                                       Group Timer=GMI
 */
2080 2081
static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
				 struct net_bridge_port_group *pg, void *h_addr,
2082 2083
				 void *srcs, u32 nsrcs, size_t addr_size,
				 int grec_type)
2084 2085 2086 2087 2088 2089 2090 2091 2092
{
	struct net_bridge_group_src *ent;
	struct br_ip src_ip;
	u32 src_idx;

	hlist_for_each_entry(ent, &pg->src_list, node)
		ent->flags |= BR_SGRP_F_DELETE;

	memset(&src_ip, 0, sizeof(src_ip));
2093
	src_ip.proto = pg->key.addr.proto;
2094
	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2095
		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2096 2097 2098 2099
		ent = br_multicast_find_group_src(pg, &src_ip);
		if (ent)
			ent->flags &= ~BR_SGRP_F_DELETE;
		else
2100 2101 2102
			ent = br_multicast_new_group_src(pg, &src_ip);
		if (ent)
			br_multicast_fwd_src_handle(ent);
2103 2104
	}

2105 2106
	br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
				grec_type);
2107

2108 2109 2110 2111 2112 2113 2114 2115 2116
	__grp_src_delete_marked(pg);
}

/* State          Msg type      New state                Actions
 * EXCLUDE (X,Y)  IS_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=GMI
 *                                                       Delete (X-A)
 *                                                       Delete (Y-A)
 *                                                       Group Timer=GMI
 */
2117 2118
static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
				 struct net_bridge_port_group *pg, void *h_addr,
2119 2120
				 void *srcs, u32 nsrcs, size_t addr_size,
				 int grec_type)
2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
{
	struct net_bridge_group_src *ent;
	unsigned long now = jiffies;
	bool changed = false;
	struct br_ip src_ip;
	u32 src_idx;

	hlist_for_each_entry(ent, &pg->src_list, node)
		ent->flags |= BR_SGRP_F_DELETE;

	memset(&src_ip, 0, sizeof(src_ip));
2132
	src_ip.proto = pg->key.addr.proto;
2133
	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2134
		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2135 2136 2137 2138 2139 2140
		ent = br_multicast_find_group_src(pg, &src_ip);
		if (ent) {
			ent->flags &= ~BR_SGRP_F_DELETE;
		} else {
			ent = br_multicast_new_group_src(pg, &src_ip);
			if (ent) {
2141
				__grp_src_mod_timer(ent,
2142
						    now + br_multicast_gmi(brmctx));
2143 2144 2145 2146 2147
				changed = true;
			}
		}
	}

2148 2149
	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
				    grec_type))
2150 2151
		changed = true;

2152 2153 2154 2155 2156 2157
	if (__grp_src_delete_marked(pg))
		changed = true;

	return changed;
}

2158 2159
static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
			       struct net_bridge_port_group *pg, void *h_addr,
2160 2161
			       void *srcs, u32 nsrcs, size_t addr_size,
			       int grec_type)
2162 2163 2164 2165 2166
{
	bool changed = false;

	switch (pg->filter_mode) {
	case MCAST_INCLUDE:
2167
		__grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2168
				     grec_type);
2169
		br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2170 2171 2172
		changed = true;
		break;
	case MCAST_EXCLUDE:
2173 2174
		changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
					       addr_size, grec_type);
2175 2176 2177 2178
		break;
	}

	pg->filter_mode = MCAST_EXCLUDE;
2179
	mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2180 2181 2182 2183

	return changed;
}

2184 2185 2186 2187
/* State          Msg type      New state                Actions
 * INCLUDE (A)    TO_IN (B)     INCLUDE (A+B)            (B)=GMI
 *                                                       Send Q(G,A-B)
 */
2188 2189 2190
static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
				struct net_bridge_mcast_port *pmctx,
				struct net_bridge_port_group *pg, void *h_addr,
2191 2192
				void *srcs, u32 nsrcs, size_t addr_size,
				int grec_type)
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
{
	u32 src_idx, to_send = pg->src_ents;
	struct net_bridge_group_src *ent;
	unsigned long now = jiffies;
	bool changed = false;
	struct br_ip src_ip;

	hlist_for_each_entry(ent, &pg->src_list, node)
		ent->flags |= BR_SGRP_F_SEND;

	memset(&src_ip, 0, sizeof(src_ip));
2204
	src_ip.proto = pg->key.addr.proto;
2205
	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2206
		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
		ent = br_multicast_find_group_src(pg, &src_ip);
		if (ent) {
			ent->flags &= ~BR_SGRP_F_SEND;
			to_send--;
		} else {
			ent = br_multicast_new_group_src(pg, &src_ip);
			if (ent)
				changed = true;
		}
		if (ent)
2217
			__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2218 2219
	}

2220 2221
	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
				    grec_type))
2222 2223
		changed = true;

2224
	if (to_send)
2225
		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2226 2227 2228 2229 2230 2231 2232 2233 2234

	return changed;
}

/* State          Msg type      New state                Actions
 * EXCLUDE (X,Y)  TO_IN (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
 *                                                       Send Q(G,X-A)
 *                                                       Send Q(G)
 */
2235 2236 2237
static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
				struct net_bridge_mcast_port *pmctx,
				struct net_bridge_port_group *pg, void *h_addr,
2238 2239
				void *srcs, u32 nsrcs, size_t addr_size,
				int grec_type)
2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
{
	u32 src_idx, to_send = pg->src_ents;
	struct net_bridge_group_src *ent;
	unsigned long now = jiffies;
	bool changed = false;
	struct br_ip src_ip;

	hlist_for_each_entry(ent, &pg->src_list, node)
		if (timer_pending(&ent->timer))
			ent->flags |= BR_SGRP_F_SEND;

	memset(&src_ip, 0, sizeof(src_ip));
2252
	src_ip.proto = pg->key.addr.proto;
2253
	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2254
		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266
		ent = br_multicast_find_group_src(pg, &src_ip);
		if (ent) {
			if (timer_pending(&ent->timer)) {
				ent->flags &= ~BR_SGRP_F_SEND;
				to_send--;
			}
		} else {
			ent = br_multicast_new_group_src(pg, &src_ip);
			if (ent)
				changed = true;
		}
		if (ent)
2267
			__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2268 2269
	}

2270 2271
	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
				    grec_type))
2272 2273
		changed = true;

2274
	if (to_send)
2275
		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2276

2277
	__grp_send_query_and_rexmit(brmctx, pmctx, pg);
2278 2279 2280 2281

	return changed;
}

2282 2283 2284
static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
			      struct net_bridge_mcast_port *pmctx,
			      struct net_bridge_port_group *pg, void *h_addr,
2285 2286
			      void *srcs, u32 nsrcs, size_t addr_size,
			      int grec_type)
2287 2288 2289 2290 2291
{
	bool changed = false;

	switch (pg->filter_mode) {
	case MCAST_INCLUDE:
2292 2293
		changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
					      nsrcs, addr_size, grec_type);
2294 2295
		break;
	case MCAST_EXCLUDE:
2296 2297
		changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
					      nsrcs, addr_size, grec_type);
2298 2299 2300
		break;
	}

2301
	if (br_multicast_eht_should_del_pg(pg)) {
2302
		pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2303 2304 2305 2306 2307 2308 2309
		br_multicast_find_del_pg(pg->key.port->br, pg);
		/* a notification has already been sent and we shouldn't
		 * access pg after the delete so we have to return false
		 */
		changed = false;
	}

2310 2311 2312 2313 2314 2315 2316 2317 2318
	return changed;
}

/* State          Msg type      New state                Actions
 * INCLUDE (A)    TO_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
 *                                                       Delete (A-B)
 *                                                       Send Q(G,A*B)
 *                                                       Group Timer=GMI
 */
2319 2320 2321
static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
				struct net_bridge_mcast_port *pmctx,
				struct net_bridge_port_group *pg, void *h_addr,
2322 2323
				void *srcs, u32 nsrcs, size_t addr_size,
				int grec_type)
2324 2325 2326 2327 2328 2329 2330 2331 2332
{
	struct net_bridge_group_src *ent;
	u32 src_idx, to_send = 0;
	struct br_ip src_ip;

	hlist_for_each_entry(ent, &pg->src_list, node)
		ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;

	memset(&src_ip, 0, sizeof(src_ip));
2333
	src_ip.proto = pg->key.addr.proto;
2334
	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2335
		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2336 2337 2338 2339 2340 2341
		ent = br_multicast_find_group_src(pg, &src_ip);
		if (ent) {
			ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
				     BR_SGRP_F_SEND;
			to_send++;
		} else {
2342
			ent = br_multicast_new_group_src(pg, &src_ip);
2343
		}
2344 2345
		if (ent)
			br_multicast_fwd_src_handle(ent);
2346 2347
	}

2348 2349
	br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
				grec_type);
2350

2351 2352
	__grp_src_delete_marked(pg);
	if (to_send)
2353
		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2354 2355 2356 2357 2358 2359 2360 2361 2362
}

/* State          Msg type      New state                Actions
 * EXCLUDE (X,Y)  TO_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=Group Timer
 *                                                       Delete (X-A)
 *                                                       Delete (Y-A)
 *                                                       Send Q(G,A-Y)
 *                                                       Group Timer=GMI
 */
2363 2364 2365
static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
				struct net_bridge_mcast_port *pmctx,
				struct net_bridge_port_group *pg, void *h_addr,
2366 2367
				void *srcs, u32 nsrcs, size_t addr_size,
				int grec_type)
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
{
	struct net_bridge_group_src *ent;
	u32 src_idx, to_send = 0;
	bool changed = false;
	struct br_ip src_ip;

	hlist_for_each_entry(ent, &pg->src_list, node)
		ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;

	memset(&src_ip, 0, sizeof(src_ip));
2378
	src_ip.proto = pg->key.addr.proto;
2379
	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2380
		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2381 2382 2383 2384 2385 2386
		ent = br_multicast_find_group_src(pg, &src_ip);
		if (ent) {
			ent->flags &= ~BR_SGRP_F_DELETE;
		} else {
			ent = br_multicast_new_group_src(pg, &src_ip);
			if (ent) {
2387
				__grp_src_mod_timer(ent, pg->timer.expires);
2388 2389 2390 2391 2392 2393 2394 2395 2396
				changed = true;
			}
		}
		if (ent && timer_pending(&ent->timer)) {
			ent->flags |= BR_SGRP_F_SEND;
			to_send++;
		}
	}

2397 2398
	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
				    grec_type))
2399 2400
		changed = true;

2401 2402 2403
	if (__grp_src_delete_marked(pg))
		changed = true;
	if (to_send)
2404
		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2405 2406 2407 2408

	return changed;
}

2409 2410 2411
static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
			      struct net_bridge_mcast_port *pmctx,
			      struct net_bridge_port_group *pg, void *h_addr,
2412 2413
			      void *srcs, u32 nsrcs, size_t addr_size,
			      int grec_type)
2414 2415 2416 2417 2418
{
	bool changed = false;

	switch (pg->filter_mode) {
	case MCAST_INCLUDE:
2419 2420
		__grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
				    addr_size, grec_type);
2421
		br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2422 2423 2424
		changed = true;
		break;
	case MCAST_EXCLUDE:
2425 2426
		changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
					      nsrcs, addr_size, grec_type);
2427 2428 2429 2430
		break;
	}

	pg->filter_mode = MCAST_EXCLUDE;
2431
	mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2432 2433 2434 2435

	return changed;
}

2436 2437 2438
/* State          Msg type      New state                Actions
 * INCLUDE (A)    BLOCK (B)     INCLUDE (A)              Send Q(G,A*B)
 */
2439 2440 2441
static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
				 struct net_bridge_mcast_port *pmctx,
				 struct net_bridge_port_group *pg, void *h_addr,
2442
				 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2443 2444 2445
{
	struct net_bridge_group_src *ent;
	u32 src_idx, to_send = 0;
2446
	bool changed = false;
2447 2448 2449 2450 2451 2452
	struct br_ip src_ip;

	hlist_for_each_entry(ent, &pg->src_list, node)
		ent->flags &= ~BR_SGRP_F_SEND;

	memset(&src_ip, 0, sizeof(src_ip));
2453
	src_ip.proto = pg->key.addr.proto;
2454
	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2455
		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2456 2457 2458 2459 2460 2461 2462
		ent = br_multicast_find_group_src(pg, &src_ip);
		if (ent) {
			ent->flags |= BR_SGRP_F_SEND;
			to_send++;
		}
	}

2463 2464
	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
				    grec_type))
2465 2466
		changed = true;

2467
	if (to_send)
2468
		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2469

2470
	return changed;
2471 2472 2473 2474 2475 2476
}

/* State          Msg type      New state                Actions
 * EXCLUDE (X,Y)  BLOCK (A)     EXCLUDE (X+(A-Y),Y)      (A-X-Y)=Group Timer
 *                                                       Send Q(G,A-Y)
 */
2477 2478 2479
static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
				 struct net_bridge_mcast_port *pmctx,
				 struct net_bridge_port_group *pg, void *h_addr,
2480
				 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2481 2482 2483 2484 2485 2486 2487 2488 2489 2490
{
	struct net_bridge_group_src *ent;
	u32 src_idx, to_send = 0;
	bool changed = false;
	struct br_ip src_ip;

	hlist_for_each_entry(ent, &pg->src_list, node)
		ent->flags &= ~BR_SGRP_F_SEND;

	memset(&src_ip, 0, sizeof(src_ip));
2491
	src_ip.proto = pg->key.addr.proto;
2492
	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2493
		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2494 2495 2496 2497
		ent = br_multicast_find_group_src(pg, &src_ip);
		if (!ent) {
			ent = br_multicast_new_group_src(pg, &src_ip);
			if (ent) {
2498
				__grp_src_mod_timer(ent, pg->timer.expires);
2499 2500 2501 2502 2503 2504 2505 2506 2507
				changed = true;
			}
		}
		if (ent && timer_pending(&ent->timer)) {
			ent->flags |= BR_SGRP_F_SEND;
			to_send++;
		}
	}

2508 2509
	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
				    grec_type))
2510 2511
		changed = true;

2512
	if (to_send)
2513
		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2514 2515 2516 2517

	return changed;
}

2518 2519 2520
static bool br_multicast_block(struct net_bridge_mcast *brmctx,
			       struct net_bridge_mcast_port *pmctx,
			       struct net_bridge_port_group *pg, void *h_addr,
2521
			       void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2522 2523 2524 2525 2526
{
	bool changed = false;

	switch (pg->filter_mode) {
	case MCAST_INCLUDE:
2527 2528
		changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
					       nsrcs, addr_size, grec_type);
2529 2530
		break;
	case MCAST_EXCLUDE:
2531 2532
		changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
					       nsrcs, addr_size, grec_type);
2533 2534 2535
		break;
	}

2536 2537
	if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
	    br_multicast_eht_should_del_pg(pg)) {
2538 2539
		if (br_multicast_eht_should_del_pg(pg))
			pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2540 2541 2542 2543 2544 2545 2546
		br_multicast_find_del_pg(pg->key.port->br, pg);
		/* a notification has already been sent and we shouldn't
		 * access pg after the delete so we have to return false
		 */
		changed = false;
	}

2547 2548 2549
	return changed;
}

2550 2551 2552 2553 2554
static struct net_bridge_port_group *
br_multicast_find_port(struct net_bridge_mdb_entry *mp,
		       struct net_bridge_port *p,
		       const unsigned char *src)
{
2555
	struct net_bridge *br __maybe_unused = mp->br;
2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
	struct net_bridge_port_group *pg;

	for (pg = mlock_dereference(mp->ports, br);
	     pg;
	     pg = mlock_dereference(pg->next, br))
		if (br_port_group_equal(pg, p, src))
			return pg;

	return NULL;
}

2567 2568
static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
					 struct net_bridge_mcast_port *pmctx,
2569 2570
					 struct sk_buff *skb,
					 u16 vid)
2571
{
2572
	bool igmpv2 = brmctx->multicast_igmp_version == 2;
2573 2574
	struct net_bridge_mdb_entry *mdst;
	struct net_bridge_port_group *pg;
F
Felix Fietkau 已提交
2575
	const unsigned char *src;
2576 2577
	struct igmpv3_report *ih;
	struct igmpv3_grec *grec;
2578
	int i, len, num, type;
2579
	__be32 group, *h_addr;
2580 2581
	bool changed = false;
	int err = 0;
2582
	u16 nsrcs;
2583 2584 2585

	ih = igmpv3_report_hdr(skb);
	num = ntohs(ih->ngrec);
2586
	len = skb_transport_offset(skb) + sizeof(*ih);
2587 2588 2589

	for (i = 0; i < num; i++) {
		len += sizeof(*grec);
2590
		if (!ip_mc_may_pull(skb, len))
2591 2592
			return -EINVAL;

H
Herbert Xu 已提交
2593
		grec = (void *)(skb->data + len - sizeof(*grec));
2594 2595
		group = grec->grec_mca;
		type = grec->grec_type;
2596
		nsrcs = ntohs(grec->grec_nsrcs);
2597

2598
		len += nsrcs * 4;
2599
		if (!ip_mc_may_pull(skb, len))
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
			return -EINVAL;

		switch (type) {
		case IGMPV3_MODE_IS_INCLUDE:
		case IGMPV3_MODE_IS_EXCLUDE:
		case IGMPV3_CHANGE_TO_INCLUDE:
		case IGMPV3_CHANGE_TO_EXCLUDE:
		case IGMPV3_ALLOW_NEW_SOURCES:
		case IGMPV3_BLOCK_OLD_SOURCES:
			break;

		default:
			continue;
		}

F
Felix Fietkau 已提交
2615
		src = eth_hdr(skb)->h_source;
2616 2617 2618
		if (nsrcs == 0 &&
		    (type == IGMPV3_CHANGE_TO_INCLUDE ||
		     type == IGMPV3_MODE_IS_INCLUDE)) {
2619 2620 2621
			if (!pmctx || igmpv2) {
				br_ip4_multicast_leave_group(brmctx, pmctx,
							     group, vid, src);
2622 2623
				continue;
			}
2624
		} else {
2625 2626
			err = br_ip4_multicast_add_group(brmctx, pmctx, group,
							 vid, src, igmpv2);
2627 2628 2629
			if (err)
				break;
		}
2630

2631
		if (!pmctx || igmpv2)
2632 2633
			continue;

2634
		spin_lock_bh(&brmctx->br->multicast_lock);
2635 2636 2637
		if (!br_multicast_ctx_should_use(brmctx, pmctx))
			goto unlock_continue;

2638
		mdst = br_mdb_ip4_get(brmctx->br, group, vid);
2639 2640
		if (!mdst)
			goto unlock_continue;
2641
		pg = br_multicast_find_port(mdst, pmctx->port, src);
2642 2643
		if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
			goto unlock_continue;
2644
		/* reload grec and host addr */
2645
		grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
2646
		h_addr = &ip_hdr(skb)->saddr;
2647 2648
		switch (type) {
		case IGMPV3_ALLOW_NEW_SOURCES:
2649 2650
			changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
							   grec->grec_src,
2651
							   nsrcs, sizeof(__be32), type);
2652
			break;
2653
		case IGMPV3_MODE_IS_INCLUDE:
2654 2655
			changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
							   grec->grec_src,
2656
							   nsrcs, sizeof(__be32), type);
2657 2658
			break;
		case IGMPV3_MODE_IS_EXCLUDE:
2659 2660
			changed = br_multicast_isexc(brmctx, pg, h_addr,
						     grec->grec_src,
2661
						     nsrcs, sizeof(__be32), type);
2662
			break;
2663
		case IGMPV3_CHANGE_TO_INCLUDE:
2664 2665
			changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
						    grec->grec_src,
2666
						    nsrcs, sizeof(__be32), type);
2667 2668
			break;
		case IGMPV3_CHANGE_TO_EXCLUDE:
2669 2670
			changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
						    grec->grec_src,
2671
						    nsrcs, sizeof(__be32), type);
2672
			break;
2673
		case IGMPV3_BLOCK_OLD_SOURCES:
2674 2675
			changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
						     grec->grec_src,
2676
						     nsrcs, sizeof(__be32), type);
2677
			break;
2678 2679
		}
		if (changed)
2680
			br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2681
unlock_continue:
2682
		spin_unlock_bh(&brmctx->br->multicast_lock);
2683 2684 2685 2686 2687
	}

	return err;
}

E
Eric Dumazet 已提交
2688
#if IS_ENABLED(CONFIG_IPV6)
2689 2690
static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
					struct net_bridge_mcast_port *pmctx,
2691 2692
					struct sk_buff *skb,
					u16 vid)
2693
{
2694
	bool mldv1 = brmctx->multicast_mld_version == 1;
2695 2696
	struct net_bridge_mdb_entry *mdst;
	struct net_bridge_port_group *pg;
2697
	unsigned int nsrcs_offset;
F
Felix Fietkau 已提交
2698
	const unsigned char *src;
2699
	struct icmp6hdr *icmp6h;
2700
	struct in6_addr *h_addr;
2701
	struct mld2_grec *grec;
2702
	unsigned int grec_len;
2703 2704
	bool changed = false;
	int i, len, num;
2705 2706
	int err = 0;

2707
	if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
2708 2709 2710 2711
		return -EINVAL;

	icmp6h = icmp6_hdr(skb);
	num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
2712
	len = skb_transport_offset(skb) + sizeof(*icmp6h);
2713 2714

	for (i = 0; i < num; i++) {
2715 2716
		__be16 *_nsrcs, __nsrcs;
		u16 nsrcs;
2717

2718 2719 2720
		nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);

		if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
2721
		    nsrcs_offset + sizeof(__nsrcs))
2722 2723
			return -EINVAL;

2724 2725 2726
		_nsrcs = skb_header_pointer(skb, nsrcs_offset,
					    sizeof(__nsrcs), &__nsrcs);
		if (!_nsrcs)
2727 2728
			return -EINVAL;

2729 2730
		nsrcs = ntohs(*_nsrcs);
		grec_len = struct_size(grec, grec_src, nsrcs);
2731 2732

		if (!ipv6_mc_may_pull(skb, len + grec_len))
2733 2734 2735
			return -EINVAL;

		grec = (struct mld2_grec *)(skb->data + len);
2736
		len += grec_len;
2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750

		switch (grec->grec_type) {
		case MLD2_MODE_IS_INCLUDE:
		case MLD2_MODE_IS_EXCLUDE:
		case MLD2_CHANGE_TO_INCLUDE:
		case MLD2_CHANGE_TO_EXCLUDE:
		case MLD2_ALLOW_NEW_SOURCES:
		case MLD2_BLOCK_OLD_SOURCES:
			break;

		default:
			continue;
		}

F
Felix Fietkau 已提交
2751
		src = eth_hdr(skb)->h_source;
2752 2753
		if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
		     grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
2754
		    nsrcs == 0) {
2755 2756
			if (!pmctx || mldv1) {
				br_ip6_multicast_leave_group(brmctx, pmctx,
2757 2758 2759 2760
							     &grec->grec_mca,
							     vid, src);
				continue;
			}
2761
		} else {
2762
			err = br_ip6_multicast_add_group(brmctx, pmctx,
F
Felix Fietkau 已提交
2763
							 &grec->grec_mca, vid,
2764
							 src, mldv1);
2765
			if (err)
2766 2767
				break;
		}
2768

2769
		if (!pmctx || mldv1)
2770 2771
			continue;

2772
		spin_lock_bh(&brmctx->br->multicast_lock);
2773 2774 2775
		if (!br_multicast_ctx_should_use(brmctx, pmctx))
			goto unlock_continue;

2776
		mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
2777 2778
		if (!mdst)
			goto unlock_continue;
2779
		pg = br_multicast_find_port(mdst, pmctx->port, src);
2780 2781
		if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
			goto unlock_continue;
2782
		h_addr = &ipv6_hdr(skb)->saddr;
2783 2784
		switch (grec->grec_type) {
		case MLD2_ALLOW_NEW_SOURCES:
2785
			changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2786
							   grec->grec_src, nsrcs,
2787 2788
							   sizeof(struct in6_addr),
							   grec->grec_type);
2789
			break;
2790
		case MLD2_MODE_IS_INCLUDE:
2791
			changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2792
							   grec->grec_src, nsrcs,
2793 2794
							   sizeof(struct in6_addr),
							   grec->grec_type);
2795 2796
			break;
		case MLD2_MODE_IS_EXCLUDE:
2797
			changed = br_multicast_isexc(brmctx, pg, h_addr,
2798
						     grec->grec_src, nsrcs,
2799 2800
						     sizeof(struct in6_addr),
						     grec->grec_type);
2801
			break;
2802
		case MLD2_CHANGE_TO_INCLUDE:
2803
			changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2804
						    grec->grec_src, nsrcs,
2805 2806
						    sizeof(struct in6_addr),
						    grec->grec_type);
2807 2808
			break;
		case MLD2_CHANGE_TO_EXCLUDE:
2809
			changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2810
						    grec->grec_src, nsrcs,
2811 2812
						    sizeof(struct in6_addr),
						    grec->grec_type);
2813
			break;
2814
		case MLD2_BLOCK_OLD_SOURCES:
2815
			changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2816
						     grec->grec_src, nsrcs,
2817 2818
						     sizeof(struct in6_addr),
						     grec->grec_type);
2819
			break;
2820 2821
		}
		if (changed)
2822
			br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2823
unlock_continue:
2824
		spin_unlock_bh(&brmctx->br->multicast_lock);
2825 2826 2827 2828 2829 2830
	}

	return err;
}
#endif

2831
static bool br_ip4_multicast_select_querier(struct net_bridge_mcast *brmctx,
2832
					    struct net_bridge_mcast_port *pmctx,
2833 2834
					    __be32 saddr)
{
2835 2836
	struct net_bridge_port *port = pmctx ? pmctx->port : NULL;

2837 2838
	if (!timer_pending(&brmctx->ip4_own_query.timer) &&
	    !timer_pending(&brmctx->ip4_other_query.timer))
2839 2840
		goto update;

2841
	if (!brmctx->ip4_querier.addr.src.ip4)
2842 2843
		goto update;

2844
	if (ntohl(saddr) <= ntohl(brmctx->ip4_querier.addr.src.ip4))
2845 2846 2847 2848 2849
		goto update;

	return false;

update:
2850
	brmctx->ip4_querier.addr.src.ip4 = saddr;
2851

2852
	/* update protected by general multicast_lock by caller */
2853
	rcu_assign_pointer(brmctx->ip4_querier.port, port);
2854

2855 2856 2857 2858
	return true;
}

#if IS_ENABLED(CONFIG_IPV6)
2859
static bool br_ip6_multicast_select_querier(struct net_bridge_mcast *brmctx,
2860
					    struct net_bridge_mcast_port *pmctx,
2861 2862
					    struct in6_addr *saddr)
{
2863 2864
	struct net_bridge_port *port = pmctx ? pmctx->port : NULL;

2865 2866
	if (!timer_pending(&brmctx->ip6_own_query.timer) &&
	    !timer_pending(&brmctx->ip6_other_query.timer))
2867 2868
		goto update;

2869
	if (ipv6_addr_cmp(saddr, &brmctx->ip6_querier.addr.src.ip6) <= 0)
2870 2871 2872 2873 2874
		goto update;

	return false;

update:
2875
	brmctx->ip6_querier.addr.src.ip6 = *saddr;
2876

2877
	/* update protected by general multicast_lock by caller */
2878
	rcu_assign_pointer(brmctx->ip6_querier.port, port);
2879

2880 2881 2882 2883
	return true;
}
#endif

2884
static void
2885
br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
2886 2887
				struct bridge_mcast_other_query *query,
				unsigned long max_delay)
2888
{
2889 2890
	if (!timer_pending(&query->timer))
		query->delay_time = jiffies + max_delay;
2891

2892
	mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
2893 2894
}

2895 2896 2897 2898 2899 2900 2901 2902 2903 2904
static void br_port_mc_router_state_change(struct net_bridge_port *p,
					   bool is_mc_router)
{
	struct switchdev_attr attr = {
		.orig_dev = p->dev,
		.id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
		.flags = SWITCHDEV_F_DEFER,
		.u.mrouter = is_mc_router,
	};

2905
	switchdev_port_attr_set(p->dev, &attr, NULL);
2906 2907
}

2908
static struct net_bridge_port *
2909
br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
2910 2911 2912
			     struct hlist_head *mc_router_list,
			     struct hlist_node *rlist)
{
2913 2914
	struct net_bridge_mcast_port *pmctx;

2915
#if IS_ENABLED(CONFIG_IPV6)
2916
	if (mc_router_list == &brmctx->ip6_mc_router_list)
2917 2918 2919
		pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
				    ip6_rlist);
	else
2920
#endif
2921 2922 2923 2924
		pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
				    ip4_rlist);

	return pmctx->port;
2925 2926 2927
}

static struct hlist_node *
2928
br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
2929 2930 2931 2932 2933 2934 2935 2936 2937
			    struct net_bridge_port *port,
			    struct hlist_head *mc_router_list)

{
	struct hlist_node *slot = NULL;
	struct net_bridge_port *p;
	struct hlist_node *rlist;

	hlist_for_each(rlist, mc_router_list) {
2938
		p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
2939 2940 2941 2942 2943 2944 2945 2946 2947 2948

		if ((unsigned long)port >= (unsigned long)p)
			break;

		slot = rlist;
	}

	return slot;
}

2949
static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
2950 2951 2952
					   struct hlist_node *rnode)
{
#if IS_ENABLED(CONFIG_IPV6)
2953 2954
	if (rnode != &pmctx->ip6_rlist)
		return hlist_unhashed(&pmctx->ip6_rlist);
2955
	else
2956
		return hlist_unhashed(&pmctx->ip4_rlist);
2957 2958 2959 2960 2961
#else
	return true;
#endif
}

2962
/* Add port to router_list
2963 2964 2965
 *  list is maintained ordered by pointer value
 *  and locked by br->multicast_lock and RCU
 */
2966
static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
2967
				    struct net_bridge_mcast_port *pmctx,
2968 2969
				    struct hlist_node *rlist,
				    struct hlist_head *mc_router_list)
2970
{
2971
	struct hlist_node *slot;
2972

2973
	if (!hlist_unhashed(rlist))
2974 2975
		return;

2976
	slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
2977

2978
	if (slot)
2979
		hlist_add_behind_rcu(rlist, slot);
2980
	else
2981 2982
		hlist_add_head_rcu(rlist, mc_router_list);

2983 2984 2985 2986
	/* For backwards compatibility for now, only notify if we
	 * switched from no IPv4/IPv6 multicast router to a new
	 * IPv4 or IPv6 multicast router.
	 */
2987
	if (br_multicast_no_router_otherpf(pmctx, rlist)) {
2988
		br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
2989
		br_port_mc_router_state_change(pmctx->port, true);
2990
	}
2991 2992
}

2993 2994 2995 2996
/* Add port to router_list
 *  list is maintained ordered by pointer value
 *  and locked by br->multicast_lock and RCU
 */
2997 2998
static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
					struct net_bridge_mcast_port *pmctx)
2999
{
3000 3001
	br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
				&brmctx->ip4_mc_router_list);
3002 3003
}

3004 3005 3006 3007
/* Add port to router_list
 *  list is maintained ordered by pointer value
 *  and locked by br->multicast_lock and RCU
 */
3008 3009
static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
					struct net_bridge_mcast_port *pmctx)
3010 3011
{
#if IS_ENABLED(CONFIG_IPV6)
3012 3013
	br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
				&brmctx->ip6_mc_router_list);
3014 3015 3016
#endif
}

3017 3018
static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
				     struct net_bridge_mcast_port *pmctx,
3019 3020 3021
				     struct timer_list *timer,
				     struct hlist_node *rlist,
				     struct hlist_head *mc_router_list)
3022 3023 3024
{
	unsigned long now = jiffies;

3025 3026 3027
	if (!br_multicast_ctx_should_use(brmctx, pmctx))
		return;

3028
	if (!pmctx) {
3029 3030 3031
		if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
			if (!br_ip4_multicast_is_router(brmctx) &&
			    !br_ip6_multicast_is_router(brmctx))
3032
				br_mc_router_state_change(brmctx->br, true);
3033
			mod_timer(timer, now + brmctx->multicast_querier_interval);
3034
		}
3035 3036 3037
		return;
	}

3038 3039
	if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
	    pmctx->multicast_router == MDB_RTR_TYPE_PERM)
3040 3041
		return;

3042
	br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
3043
	mod_timer(timer, now + brmctx->multicast_querier_interval);
3044 3045
}

3046 3047
static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
					 struct net_bridge_mcast_port *pmctx)
3048
{
3049
	struct timer_list *timer = &brmctx->ip4_mc_router_timer;
3050 3051
	struct hlist_node *rlist = NULL;

3052 3053 3054
	if (pmctx) {
		timer = &pmctx->ip4_mc_router_timer;
		rlist = &pmctx->ip4_rlist;
3055
	}
3056

3057 3058
	br_multicast_mark_router(brmctx, pmctx, timer, rlist,
				 &brmctx->ip4_mc_router_list);
3059 3060
}

3061 3062
static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
					 struct net_bridge_mcast_port *pmctx)
3063 3064
{
#if IS_ENABLED(CONFIG_IPV6)
3065
	struct timer_list *timer = &brmctx->ip6_mc_router_timer;
3066 3067
	struct hlist_node *rlist = NULL;

3068 3069 3070
	if (pmctx) {
		timer = &pmctx->ip6_mc_router_timer;
		rlist = &pmctx->ip6_rlist;
3071 3072
	}

3073 3074
	br_multicast_mark_router(brmctx, pmctx, timer, rlist,
				 &brmctx->ip6_mc_router_list);
3075 3076 3077
#endif
}

3078
static void
3079 3080
br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
				struct net_bridge_mcast_port *pmctx,
3081 3082 3083 3084
				struct bridge_mcast_other_query *query,
				struct br_ip *saddr,
				unsigned long max_delay)
{
3085
	if (!br_ip4_multicast_select_querier(brmctx, pmctx, saddr->src.ip4))
3086 3087
		return;

3088 3089
	br_multicast_update_query_timer(brmctx, query, max_delay);
	br_ip4_multicast_mark_router(brmctx, pmctx);
3090 3091 3092 3093
}

#if IS_ENABLED(CONFIG_IPV6)
static void
3094 3095
br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
				struct net_bridge_mcast_port *pmctx,
3096 3097 3098
				struct bridge_mcast_other_query *query,
				struct br_ip *saddr,
				unsigned long max_delay)
3099
{
3100
	if (!br_ip6_multicast_select_querier(brmctx, pmctx, &saddr->src.ip6))
3101 3102
		return;

3103 3104
	br_multicast_update_query_timer(brmctx, query, max_delay);
	br_ip6_multicast_mark_router(brmctx, pmctx);
3105
}
3106
#endif
3107

3108 3109
static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
				   struct net_bridge_mcast_port *pmctx,
3110 3111
				   struct sk_buff *skb,
				   u16 vid)
3112
{
3113
	unsigned int transport_len = ip_transport_len(skb);
3114
	const struct iphdr *iph = ip_hdr(skb);
3115 3116 3117 3118
	struct igmphdr *ih = igmp_hdr(skb);
	struct net_bridge_mdb_entry *mp;
	struct igmpv3_query *ih3;
	struct net_bridge_port_group *p;
3119
	struct net_bridge_port_group __rcu **pp;
3120
	struct br_ip saddr;
3121 3122 3123 3124
	unsigned long max_delay;
	unsigned long now = jiffies;
	__be32 group;

3125
	spin_lock(&brmctx->br->multicast_lock);
3126
	if (!br_multicast_ctx_should_use(brmctx, pmctx))
3127 3128 3129 3130
		goto out;

	group = ih->group;

3131
	if (transport_len == sizeof(*ih)) {
3132 3133 3134 3135 3136 3137
		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);

		if (!max_delay) {
			max_delay = 10 * HZ;
			group = 0;
		}
3138
	} else if (transport_len >= sizeof(*ih3)) {
3139
		ih3 = igmpv3_query_hdr(skb);
3140
		if (ih3->nsrcs ||
3141 3142
		    (brmctx->multicast_igmp_version == 3 && group &&
		     ih3->suppress))
3143
			goto out;
3144

3145 3146
		max_delay = ih3->code ?
			    IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
3147
	} else {
3148 3149 3150
		goto out;
	}

3151 3152
	if (!group) {
		saddr.proto = htons(ETH_P_IP);
3153
		saddr.src.ip4 = iph->saddr;
3154

3155
		br_ip4_multicast_query_received(brmctx, pmctx,
3156
						&brmctx->ip4_other_query,
3157
						&saddr, max_delay);
3158
		goto out;
3159
	}
3160

3161
	mp = br_mdb_ip4_get(brmctx->br, group, vid);
3162 3163 3164
	if (!mp)
		goto out;

3165
	max_delay *= brmctx->multicast_last_member_count;
3166

3167
	if (mp->host_joined &&
3168 3169 3170 3171 3172
	    (timer_pending(&mp->timer) ?
	     time_after(mp->timer.expires, now + max_delay) :
	     try_to_del_timer_sync(&mp->timer) >= 0))
		mod_timer(&mp->timer, now + max_delay);

3173
	for (pp = &mp->ports;
3174
	     (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3175
	     pp = &p->next) {
3176 3177
		if (timer_pending(&p->timer) ?
		    time_after(p->timer.expires, now + max_delay) :
3178
		    try_to_del_timer_sync(&p->timer) >= 0 &&
3179
		    (brmctx->multicast_igmp_version == 2 ||
3180
		     p->filter_mode == MCAST_EXCLUDE))
3181
			mod_timer(&p->timer, now + max_delay);
3182 3183 3184
	}

out:
3185
	spin_unlock(&brmctx->br->multicast_lock);
3186 3187
}

E
Eric Dumazet 已提交
3188
#if IS_ENABLED(CONFIG_IPV6)
3189 3190
static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
				  struct net_bridge_mcast_port *pmctx,
3191 3192
				  struct sk_buff *skb,
				  u16 vid)
3193
{
3194
	unsigned int transport_len = ipv6_transport_len(skb);
3195
	struct mld_msg *mld;
3196 3197
	struct net_bridge_mdb_entry *mp;
	struct mld2_query *mld2q;
3198 3199
	struct net_bridge_port_group *p;
	struct net_bridge_port_group __rcu **pp;
3200
	struct br_ip saddr;
3201 3202
	unsigned long max_delay;
	unsigned long now = jiffies;
3203
	unsigned int offset = skb_transport_offset(skb);
3204
	const struct in6_addr *group = NULL;
3205
	bool is_general_query;
3206 3207
	int err = 0;

3208
	spin_lock(&brmctx->br->multicast_lock);
3209
	if (!br_multicast_ctx_should_use(brmctx, pmctx))
3210 3211
		goto out;

3212
	if (transport_len == sizeof(*mld)) {
3213
		if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
3214 3215 3216 3217
			err = -EINVAL;
			goto out;
		}
		mld = (struct mld_msg *) icmp6_hdr(skb);
L
Li RongQing 已提交
3218
		max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
3219 3220
		if (max_delay)
			group = &mld->mld_mca;
3221
	} else {
3222
		if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
3223 3224 3225 3226 3227 3228
			err = -EINVAL;
			goto out;
		}
		mld2q = (struct mld2_query *)icmp6_hdr(skb);
		if (!mld2q->mld2q_nsrcs)
			group = &mld2q->mld2q_mca;
3229
		if (brmctx->multicast_mld_version == 2 &&
3230 3231 3232
		    !ipv6_addr_any(&mld2q->mld2q_mca) &&
		    mld2q->mld2q_suppress)
			goto out;
3233 3234

		max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
3235 3236
	}

3237 3238
	is_general_query = group && ipv6_addr_any(group);

3239 3240
	if (is_general_query) {
		saddr.proto = htons(ETH_P_IPV6);
3241
		saddr.src.ip6 = ipv6_hdr(skb)->saddr;
3242

3243
		br_ip6_multicast_query_received(brmctx, pmctx,
3244
						&brmctx->ip6_other_query,
3245
						&saddr, max_delay);
3246
		goto out;
3247 3248
	} else if (!group) {
		goto out;
3249
	}
3250

3251
	mp = br_mdb_ip6_get(brmctx->br, group, vid);
3252 3253 3254
	if (!mp)
		goto out;

3255
	max_delay *= brmctx->multicast_last_member_count;
3256
	if (mp->host_joined &&
3257 3258 3259 3260 3261
	    (timer_pending(&mp->timer) ?
	     time_after(mp->timer.expires, now + max_delay) :
	     try_to_del_timer_sync(&mp->timer) >= 0))
		mod_timer(&mp->timer, now + max_delay);

3262
	for (pp = &mp->ports;
3263
	     (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3264
	     pp = &p->next) {
3265 3266
		if (timer_pending(&p->timer) ?
		    time_after(p->timer.expires, now + max_delay) :
3267
		    try_to_del_timer_sync(&p->timer) >= 0 &&
3268
		    (brmctx->multicast_mld_version == 1 ||
3269
		     p->filter_mode == MCAST_EXCLUDE))
3270
			mod_timer(&p->timer, now + max_delay);
3271 3272 3273
	}

out:
3274
	spin_unlock(&brmctx->br->multicast_lock);
3275 3276 3277 3278
	return err;
}
#endif

3279
static void
3280 3281
br_multicast_leave_group(struct net_bridge_mcast *brmctx,
			 struct net_bridge_mcast_port *pmctx,
3282 3283
			 struct br_ip *group,
			 struct bridge_mcast_other_query *other_query,
F
Felix Fietkau 已提交
3284 3285
			 struct bridge_mcast_own_query *own_query,
			 const unsigned char *src)
3286 3287 3288 3289 3290 3291
{
	struct net_bridge_mdb_entry *mp;
	struct net_bridge_port_group *p;
	unsigned long now;
	unsigned long time;

3292
	spin_lock(&brmctx->br->multicast_lock);
3293
	if (!br_multicast_ctx_should_use(brmctx, pmctx))
3294 3295
		goto out;

3296
	mp = br_mdb_ip_get(brmctx->br, group);
3297 3298 3299
	if (!mp)
		goto out;

3300
	if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
3301 3302 3303
		struct net_bridge_port_group __rcu **pp;

		for (pp = &mp->ports;
3304
		     (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3305
		     pp = &p->next) {
3306
			if (!br_port_group_equal(p, pmctx->port, src))
3307 3308
				continue;

3309 3310 3311
			if (p->flags & MDB_PG_FLAGS_PERMANENT)
				break;

3312 3313
			p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
			br_multicast_del_pg(mp, p, pp);
3314 3315 3316 3317 3318 3319 3320
		}
		goto out;
	}

	if (timer_pending(&other_query->timer))
		goto out;

3321
	if (brmctx->multicast_querier) {
3322
		__br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
3323
					  false, 0, NULL);
3324

3325 3326
		time = jiffies + brmctx->multicast_last_member_count *
				 brmctx->multicast_last_member_interval;
3327

3328
		mod_timer(&own_query->timer, time);
3329

3330
		for (p = mlock_dereference(mp->ports, brmctx->br);
3331
		     p != NULL && pmctx != NULL;
3332 3333
		     p = mlock_dereference(p->next, brmctx->br)) {
			if (!br_port_group_equal(p, pmctx->port, src))
3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346
				continue;

			if (!hlist_unhashed(&p->mglist) &&
			    (timer_pending(&p->timer) ?
			     time_after(p->timer.expires, time) :
			     try_to_del_timer_sync(&p->timer) >= 0)) {
				mod_timer(&p->timer, time);
			}

			break;
		}
	}

3347
	now = jiffies;
3348 3349
	time = now + brmctx->multicast_last_member_count *
		     brmctx->multicast_last_member_interval;
3350

3351
	if (!pmctx) {
3352
		if (mp->host_joined &&
3353 3354 3355 3356 3357
		    (timer_pending(&mp->timer) ?
		     time_after(mp->timer.expires, time) :
		     try_to_del_timer_sync(&mp->timer) >= 0)) {
			mod_timer(&mp->timer, time);
		}
3358 3359 3360 3361

		goto out;
	}

3362
	for (p = mlock_dereference(mp->ports, brmctx->br);
3363
	     p != NULL;
3364 3365
	     p = mlock_dereference(p->next, brmctx->br)) {
		if (p->key.port != pmctx->port)
3366 3367 3368 3369 3370 3371 3372 3373 3374 3375
			continue;

		if (!hlist_unhashed(&p->mglist) &&
		    (timer_pending(&p->timer) ?
		     time_after(p->timer.expires, time) :
		     try_to_del_timer_sync(&p->timer) >= 0)) {
			mod_timer(&p->timer, time);
		}

		break;
3376 3377
	}
out:
3378
	spin_unlock(&brmctx->br->multicast_lock);
3379 3380
}

3381 3382
static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
					 struct net_bridge_mcast_port *pmctx,
3383
					 __be32 group,
F
Felix Fietkau 已提交
3384 3385
					 __u16 vid,
					 const unsigned char *src)
3386 3387
{
	struct br_ip br_group;
3388
	struct bridge_mcast_own_query *own_query;
3389 3390 3391 3392

	if (ipv4_is_local_multicast(group))
		return;

3393
	own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
3394

3395
	memset(&br_group, 0, sizeof(br_group));
3396
	br_group.dst.ip4 = group;
3397
	br_group.proto = htons(ETH_P_IP);
3398
	br_group.vid = vid;
3399

3400 3401
	br_multicast_leave_group(brmctx, pmctx, &br_group,
				 &brmctx->ip4_other_query,
F
Felix Fietkau 已提交
3402
				 own_query, src);
3403 3404
}

E
Eric Dumazet 已提交
3405
#if IS_ENABLED(CONFIG_IPV6)
3406 3407
static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
					 struct net_bridge_mcast_port *pmctx,
3408
					 const struct in6_addr *group,
F
Felix Fietkau 已提交
3409 3410
					 __u16 vid,
					 const unsigned char *src)
3411 3412
{
	struct br_ip br_group;
3413
	struct bridge_mcast_own_query *own_query;
3414

3415
	if (ipv6_addr_is_ll_all_nodes(group))
3416 3417
		return;

3418
	own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
3419

3420
	memset(&br_group, 0, sizeof(br_group));
3421
	br_group.dst.ip6 = *group;
3422
	br_group.proto = htons(ETH_P_IPV6);
3423
	br_group.vid = vid;
3424

3425 3426
	br_multicast_leave_group(brmctx, pmctx, &br_group,
				 &brmctx->ip6_other_query,
F
Felix Fietkau 已提交
3427
				 own_query, src);
3428 3429
}
#endif
3430

3431 3432 3433 3434 3435 3436 3437
static void br_multicast_err_count(const struct net_bridge *br,
				   const struct net_bridge_port *p,
				   __be16 proto)
{
	struct bridge_mcast_stats __percpu *stats;
	struct bridge_mcast_stats *pstats;

3438
	if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463
		return;

	if (p)
		stats = p->mcast_stats;
	else
		stats = br->mcast_stats;
	if (WARN_ON(!stats))
		return;

	pstats = this_cpu_ptr(stats);

	u64_stats_update_begin(&pstats->syncp);
	switch (proto) {
	case htons(ETH_P_IP):
		pstats->mstats.igmp_parse_errors++;
		break;
#if IS_ENABLED(CONFIG_IPV6)
	case htons(ETH_P_IPV6):
		pstats->mstats.mld_parse_errors++;
		break;
#endif
	}
	u64_stats_update_end(&pstats->syncp);
}

3464 3465
static void br_multicast_pim(struct net_bridge_mcast *brmctx,
			     struct net_bridge_mcast_port *pmctx,
3466 3467 3468 3469 3470 3471 3472 3473 3474 3475
			     const struct sk_buff *skb)
{
	unsigned int offset = skb_transport_offset(skb);
	struct pimhdr *pimhdr, _pimhdr;

	pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
	if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
	    pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
		return;

3476 3477 3478
	spin_lock(&brmctx->br->multicast_lock);
	br_ip4_multicast_mark_router(brmctx, pmctx);
	spin_unlock(&brmctx->br->multicast_lock);
3479 3480
}

3481 3482
static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
				    struct net_bridge_mcast_port *pmctx,
3483 3484 3485 3486 3487 3488
				    struct sk_buff *skb)
{
	if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
	    igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
		return -ENOMSG;

3489 3490 3491
	spin_lock(&brmctx->br->multicast_lock);
	br_ip4_multicast_mark_router(brmctx, pmctx);
	spin_unlock(&brmctx->br->multicast_lock);
3492 3493 3494 3495

	return 0;
}

3496 3497
static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
				 struct net_bridge_mcast_port *pmctx,
3498 3499
				 struct sk_buff *skb,
				 u16 vid)
3500
{
3501
	struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
F
Felix Fietkau 已提交
3502
	const unsigned char *src;
3503 3504 3505
	struct igmphdr *ih;
	int err;

3506
	err = ip_mc_check_igmp(skb);
3507

3508
	if (err == -ENOMSG) {
3509
		if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
3510
			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3511 3512
		} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
			if (ip_hdr(skb)->protocol == IPPROTO_PIM)
3513
				br_multicast_pim(brmctx, pmctx, skb);
3514
		} else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
3515
			br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
3516
		}
3517

3518
		return 0;
3519
	} else if (err < 0) {
3520
		br_multicast_err_count(brmctx->br, p, skb->protocol);
3521
		return err;
3522
	}
3523

3524
	ih = igmp_hdr(skb);
F
Felix Fietkau 已提交
3525
	src = eth_hdr(skb)->h_source;
3526
	BR_INPUT_SKB_CB(skb)->igmp = ih->type;
3527 3528 3529 3530

	switch (ih->type) {
	case IGMP_HOST_MEMBERSHIP_REPORT:
	case IGMPV2_HOST_MEMBERSHIP_REPORT:
3531
		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3532 3533
		err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
						 src, true);
3534 3535
		break;
	case IGMPV3_HOST_MEMBERSHIP_REPORT:
3536
		err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
3537 3538
		break;
	case IGMP_HOST_MEMBERSHIP_QUERY:
3539
		br_ip4_multicast_query(brmctx, pmctx, skb, vid);
3540 3541
		break;
	case IGMP_HOST_LEAVE_MESSAGE:
3542
		br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
3543 3544 3545
		break;
	}

3546
	br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3547 3548
			   BR_MCAST_DIR_RX);

3549 3550 3551
	return err;
}

E
Eric Dumazet 已提交
3552
#if IS_ENABLED(CONFIG_IPV6)
3553 3554
static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
				     struct net_bridge_mcast_port *pmctx,
3555
				     struct sk_buff *skb)
3556 3557
{
	if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
3558
		return;
3559

3560 3561 3562
	spin_lock(&brmctx->br->multicast_lock);
	br_ip6_multicast_mark_router(brmctx, pmctx);
	spin_unlock(&brmctx->br->multicast_lock);
3563 3564
}

3565 3566
static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
				 struct net_bridge_mcast_port *pmctx,
3567 3568
				 struct sk_buff *skb,
				 u16 vid)
3569
{
3570
	struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
F
Felix Fietkau 已提交
3571
	const unsigned char *src;
3572
	struct mld_msg *mld;
3573 3574
	int err;

3575
	err = ipv6_mc_check_mld(skb);
3576

3577
	if (err == -ENOMSG || err == -ENODATA) {
3578 3579
		if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3580 3581
		if (err == -ENODATA &&
		    ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
3582
			br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
3583

3584
		return 0;
3585
	} else if (err < 0) {
3586
		br_multicast_err_count(brmctx->br, p, skb->protocol);
3587
		return err;
3588 3589
	}

3590
	mld = (struct mld_msg *)skb_transport_header(skb);
3591
	BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
3592

3593
	switch (mld->mld_type) {
3594
	case ICMPV6_MGM_REPORT:
F
Felix Fietkau 已提交
3595
		src = eth_hdr(skb)->h_source;
3596
		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3597 3598
		err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
						 vid, src, true);
3599 3600
		break;
	case ICMPV6_MLD2_REPORT:
3601
		err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
3602 3603
		break;
	case ICMPV6_MGM_QUERY:
3604
		err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
3605 3606
		break;
	case ICMPV6_MGM_REDUCTION:
F
Felix Fietkau 已提交
3607
		src = eth_hdr(skb)->h_source;
3608 3609
		br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
					     src);
3610
		break;
3611 3612
	}

3613
	br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3614 3615
			   BR_MCAST_DIR_RX);

3616 3617 3618 3619
	return err;
}
#endif

3620 3621 3622
int br_multicast_rcv(struct net_bridge_mcast **brmctx,
		     struct net_bridge_mcast_port **pmctx,
		     struct net_bridge_vlan *vlan,
3623
		     struct sk_buff *skb, u16 vid)
3624
{
3625 3626
	int ret = 0;

3627 3628 3629
	BR_INPUT_SKB_CB(skb)->igmp = 0;
	BR_INPUT_SKB_CB(skb)->mrouters_only = 0;

3630
	if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
3631 3632
		return 0;

3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652
	if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
		const struct net_bridge_vlan *masterv;

		/* the vlan has the master flag set only when transmitting
		 * through the bridge device
		 */
		if (br_vlan_is_master(vlan)) {
			masterv = vlan;
			*brmctx = &vlan->br_mcast_ctx;
			*pmctx = NULL;
		} else {
			masterv = vlan->brvlan;
			*brmctx = &vlan->brvlan->br_mcast_ctx;
			*pmctx = &vlan->port_mcast_ctx;
		}

		if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
			return 0;
	}

3653 3654
	switch (skb->protocol) {
	case htons(ETH_P_IP):
3655
		ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
3656
		break;
E
Eric Dumazet 已提交
3657
#if IS_ENABLED(CONFIG_IPV6)
3658
	case htons(ETH_P_IPV6):
3659
		ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
3660
		break;
3661
#endif
3662 3663
	}

3664
	return ret;
3665 3666
}

3667
static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
3668 3669
				       struct bridge_mcast_own_query *query,
				       struct bridge_mcast_querier *querier)
3670
{
3671
	spin_lock(&brmctx->br->multicast_lock);
3672 3673 3674
	if (br_multicast_ctx_vlan_disabled(brmctx))
		goto out;

3675
	if (query->startup_sent < brmctx->multicast_startup_query_count)
3676 3677
		query->startup_sent++;

3678
	RCU_INIT_POINTER(querier->port, NULL);
3679
	br_multicast_send_query(brmctx, NULL, query);
3680
out:
3681
	spin_unlock(&brmctx->br->multicast_lock);
3682 3683
}

3684
static void br_ip4_multicast_query_expired(struct timer_list *t)
3685
{
3686 3687
	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
						     ip4_own_query.timer);
3688

3689
	br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
3690
				   &brmctx->ip4_querier);
3691
}
3692

3693
#if IS_ENABLED(CONFIG_IPV6)
3694
static void br_ip6_multicast_query_expired(struct timer_list *t)
3695
{
3696 3697
	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
						     ip6_own_query.timer);
3698

3699
	br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
3700
				   &brmctx->ip6_querier);
3701
}
3702
#endif
3703

3704
static void br_multicast_gc_work(struct work_struct *work)
3705 3706
{
	struct net_bridge *br = container_of(work, struct net_bridge,
3707
					     mcast_gc_work);
3708 3709 3710
	HLIST_HEAD(deleted_head);

	spin_lock_bh(&br->multicast_lock);
3711
	hlist_move_list(&br->mcast_gc_list, &deleted_head);
3712 3713
	spin_unlock_bh(&br->multicast_lock);

3714
	br_multicast_gc(&deleted_head);
3715 3716
}

3717 3718 3719
void br_multicast_ctx_init(struct net_bridge *br,
			   struct net_bridge_vlan *vlan,
			   struct net_bridge_mcast *brmctx)
3720
{
3721 3722 3723 3724 3725
	brmctx->br = br;
	brmctx->vlan = vlan;
	brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
	brmctx->multicast_last_member_count = 2;
	brmctx->multicast_startup_query_count = 2;
3726

3727 3728 3729 3730 3731 3732 3733 3734 3735 3736
	brmctx->multicast_last_member_interval = HZ;
	brmctx->multicast_query_response_interval = 10 * HZ;
	brmctx->multicast_startup_query_interval = 125 * HZ / 4;
	brmctx->multicast_query_interval = 125 * HZ;
	brmctx->multicast_querier_interval = 255 * HZ;
	brmctx->multicast_membership_interval = 260 * HZ;

	brmctx->ip4_other_query.delay_time = 0;
	brmctx->ip4_querier.port = NULL;
	brmctx->multicast_igmp_version = 2;
3737
#if IS_ENABLED(CONFIG_IPV6)
3738 3739 3740
	brmctx->multicast_mld_version = 1;
	brmctx->ip6_other_query.delay_time = 0;
	brmctx->ip6_querier.port = NULL;
3741
#endif
3742

3743
	timer_setup(&brmctx->ip4_mc_router_timer,
3744
		    br_ip4_multicast_local_router_expired, 0);
3745
	timer_setup(&brmctx->ip4_other_query.timer,
3746
		    br_ip4_multicast_querier_expired, 0);
3747
	timer_setup(&brmctx->ip4_own_query.timer,
3748
		    br_ip4_multicast_query_expired, 0);
3749
#if IS_ENABLED(CONFIG_IPV6)
3750
	timer_setup(&brmctx->ip6_mc_router_timer,
3751
		    br_ip6_multicast_local_router_expired, 0);
3752
	timer_setup(&brmctx->ip6_other_query.timer,
3753
		    br_ip6_multicast_querier_expired, 0);
3754
	timer_setup(&brmctx->ip6_own_query.timer,
3755
		    br_ip6_multicast_query_expired, 0);
3756
#endif
3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773
}

void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
{
	__br_multicast_stop(brmctx);
}

void br_multicast_init(struct net_bridge *br)
{
	br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;

	br_multicast_ctx_init(br, NULL, &br->multicast_ctx);

	br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
	br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);

	spin_lock_init(&br->multicast_lock);
3774
	INIT_HLIST_HEAD(&br->mdb_list);
3775 3776
	INIT_HLIST_HEAD(&br->mcast_gc_list);
	INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
3777 3778
}

3779 3780 3781 3782 3783 3784 3785
static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
{
	struct in_device *in_dev = in_dev_get(br->dev);

	if (!in_dev)
		return;

3786
	__ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803
	in_dev_put(in_dev);
}

#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
{
	struct in6_addr addr;

	ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
	ipv6_dev_mc_inc(br->dev, &addr);
}
#else
static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
{
}
#endif

3804
void br_multicast_join_snoopers(struct net_bridge *br)
3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816
{
	br_ip4_multicast_join_snoopers(br);
	br_ip6_multicast_join_snoopers(br);
}

static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
{
	struct in_device *in_dev = in_dev_get(br->dev);

	if (WARN_ON(!in_dev))
		return;

3817
	__ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834
	in_dev_put(in_dev);
}

#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
{
	struct in6_addr addr;

	ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
	ipv6_dev_mc_dec(br->dev, &addr);
}
#else
static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
{
}
#endif

3835
void br_multicast_leave_snoopers(struct net_bridge *br)
3836 3837 3838 3839 3840
{
	br_ip4_multicast_leave_snoopers(br);
	br_ip6_multicast_leave_snoopers(br);
}

3841 3842
static void __br_multicast_open_query(struct net_bridge *br,
				      struct bridge_mcast_own_query *query)
3843
{
3844
	query->startup_sent = 0;
3845

3846
	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
3847 3848
		return;

3849 3850 3851
	mod_timer(&query->timer, jiffies);
}

3852
static void __br_multicast_open(struct net_bridge_mcast *brmctx)
3853
{
3854
	__br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
3855
#if IS_ENABLED(CONFIG_IPV6)
3856
	__br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
3857
#endif
3858 3859
}

3860 3861 3862 3863
void br_multicast_open(struct net_bridge *br)
{
	ASSERT_RTNL();

3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877
	if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
		struct net_bridge_vlan_group *vg;
		struct net_bridge_vlan *vlan;

		vg = br_vlan_group(br);
		if (vg) {
			list_for_each_entry(vlan, &vg->vlan_list, vlist) {
				struct net_bridge_mcast *brmctx;

				brmctx = &vlan->br_mcast_ctx;
				if (br_vlan_is_brentry(vlan) &&
				    !br_multicast_ctx_vlan_disabled(brmctx))
					__br_multicast_open(&vlan->br_mcast_ctx);
			}
3878
		}
3879 3880
	} else {
		__br_multicast_open(&br->multicast_ctx);
3881 3882 3883
	}
}

3884
static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
3885
{
3886 3887 3888
	del_timer_sync(&brmctx->ip4_mc_router_timer);
	del_timer_sync(&brmctx->ip4_other_query.timer);
	del_timer_sync(&brmctx->ip4_own_query.timer);
3889
#if IS_ENABLED(CONFIG_IPV6)
3890 3891 3892
	del_timer_sync(&brmctx->ip6_mc_router_timer);
	del_timer_sync(&brmctx->ip6_other_query.timer);
	del_timer_sync(&brmctx->ip6_own_query.timer);
3893
#endif
3894 3895
}

3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940
void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
{
	struct net_bridge *br;

	/* it's okay to check for the flag without the multicast lock because it
	 * can only change under RTNL -> multicast_lock, we need the latter to
	 * sync with timers and packets
	 */
	if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
		return;

	if (br_vlan_is_master(vlan)) {
		br = vlan->br;

		if (!br_vlan_is_brentry(vlan) ||
		    (on &&
		     br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
			return;

		spin_lock_bh(&br->multicast_lock);
		vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
		spin_unlock_bh(&br->multicast_lock);

		if (on)
			__br_multicast_open(&vlan->br_mcast_ctx);
		else
			__br_multicast_stop(&vlan->br_mcast_ctx);
	} else {
		struct net_bridge_mcast *brmctx;

		brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
		if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
			return;

		br = vlan->port->br;
		spin_lock_bh(&br->multicast_lock);
		vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
		if (on)
			__br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
		else
			__br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
		spin_unlock_bh(&br->multicast_lock);
	}
}

3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959
void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
{
	struct net_bridge_port *p;

	if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
		return;

	list_for_each_entry(p, &vlan->br->port_list, list) {
		struct net_bridge_vlan *vport;

		vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
		if (!vport)
			continue;
		br_multicast_toggle_one_vlan(vport, on);
	}
}

int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
				      struct netlink_ext_ack *extack)
3960
{
3961 3962
	struct net_bridge_vlan_group *vg;
	struct net_bridge_vlan *vlan;
3963
	struct net_bridge_port *p;
3964

3965 3966 3967 3968 3969 3970 3971
	if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
		return 0;

	if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
		NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
		return -EINVAL;
	}
3972 3973

	vg = br_vlan_group(br);
3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996
	if (!vg)
		return 0;

	br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);

	/* disable/enable non-vlan mcast contexts based on vlan snooping */
	if (on)
		__br_multicast_stop(&br->multicast_ctx);
	else
		__br_multicast_open(&br->multicast_ctx);
	list_for_each_entry(p, &br->port_list, list) {
		if (on)
			br_multicast_disable_port(p);
		else
			br_multicast_enable_port(p);
	}

	list_for_each_entry(vlan, &vg->vlan_list, vlist)
		br_multicast_toggle_vlan(vlan, on);

	return 0;
}

3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012
bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
{
	ASSERT_RTNL();

	/* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
	 * requires only RTNL to change
	 */
	if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
		return false;

	vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
	br_multicast_toggle_vlan(vlan, on);

	return true;
}

4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030
void br_multicast_stop(struct net_bridge *br)
{
	ASSERT_RTNL();

	if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
		struct net_bridge_vlan_group *vg;
		struct net_bridge_vlan *vlan;

		vg = br_vlan_group(br);
		if (vg) {
			list_for_each_entry(vlan, &vg->vlan_list, vlist) {
				struct net_bridge_mcast *brmctx;

				brmctx = &vlan->br_mcast_ctx;
				if (br_vlan_is_brentry(vlan) &&
				    !br_multicast_ctx_vlan_disabled(brmctx))
					__br_multicast_stop(&vlan->br_mcast_ctx);
			}
4031
		}
4032 4033
	} else {
		__br_multicast_stop(&br->multicast_ctx);
4034
	}
4035 4036
}

4037 4038 4039
void br_multicast_dev_del(struct net_bridge *br)
{
	struct net_bridge_mdb_entry *mp;
4040
	HLIST_HEAD(deleted_head);
4041
	struct hlist_node *tmp;
4042 4043

	spin_lock_bh(&br->multicast_lock);
4044 4045 4046
	hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
		br_multicast_del_mdb_entry(mp);
	hlist_move_list(&br->mcast_gc_list, &deleted_head);
4047
	spin_unlock_bh(&br->multicast_lock);
4048

4049
	br_multicast_ctx_deinit(&br->multicast_ctx);
4050 4051
	br_multicast_gc(&deleted_head);
	cancel_work_sync(&br->mcast_gc_work);
4052

4053
	rcu_barrier();
4054
}
4055 4056 4057

int br_multicast_set_router(struct net_bridge *br, unsigned long val)
{
4058
	struct net_bridge_mcast *brmctx = &br->multicast_ctx;
4059
	int err = -EINVAL;
4060 4061 4062 4063

	spin_lock_bh(&br->multicast_lock);

	switch (val) {
4064 4065
	case MDB_RTR_TYPE_DISABLED:
	case MDB_RTR_TYPE_PERM:
4066
		br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
4067
		del_timer(&brmctx->ip4_mc_router_timer);
4068
#if IS_ENABLED(CONFIG_IPV6)
4069
		del_timer(&brmctx->ip6_mc_router_timer);
4070
#endif
4071
		brmctx->multicast_router = val;
4072 4073
		err = 0;
		break;
4074
	case MDB_RTR_TYPE_TEMP_QUERY:
4075
		if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
4076
			br_mc_router_state_change(br, false);
4077
		brmctx->multicast_router = val;
4078 4079 4080 4081 4082 4083 4084 4085 4086
		err = 0;
		break;
	}

	spin_unlock_bh(&br->multicast_lock);

	return err;
}

4087
static void
4088
br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
4089
{
4090
	if (!deleted)
4091
		return;
4092

4093 4094 4095
	/* For backwards compatibility for now, only notify if there is
	 * no multicast router anymore for both IPv4 and IPv6.
	 */
4096
	if (!hlist_unhashed(&pmctx->ip4_rlist))
4097 4098
		return;
#if IS_ENABLED(CONFIG_IPV6)
4099
	if (!hlist_unhashed(&pmctx->ip6_rlist))
4100 4101 4102
		return;
#endif

4103
	br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
4104
	br_port_mc_router_state_change(pmctx->port, false);
4105 4106

	/* don't allow timer refresh */
4107 4108
	if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
		pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4109 4110
}

4111 4112
int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
{
4113
	struct net_bridge_mcast *brmctx = &p->br->multicast_ctx;
4114
	struct net_bridge_mcast_port *pmctx = &p->multicast_ctx;
4115
	unsigned long now = jiffies;
4116
	int err = -EINVAL;
4117
	bool del = false;
4118

4119
	spin_lock(&p->br->multicast_lock);
4120
	if (pmctx->multicast_router == val) {
4121
		/* Refresh the temp router port timer */
4122 4123
		if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
			mod_timer(&pmctx->ip4_mc_router_timer,
4124
				  now + brmctx->multicast_querier_interval);
4125
#if IS_ENABLED(CONFIG_IPV6)
4126
			mod_timer(&pmctx->ip6_mc_router_timer,
4127
				  now + brmctx->multicast_querier_interval);
4128 4129
#endif
		}
4130 4131 4132
		err = 0;
		goto unlock;
	}
4133
	switch (val) {
4134
	case MDB_RTR_TYPE_DISABLED:
4135 4136 4137 4138
		pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
		del |= br_ip4_multicast_rport_del(pmctx);
		del_timer(&pmctx->ip4_mc_router_timer);
		del |= br_ip6_multicast_rport_del(pmctx);
4139
#if IS_ENABLED(CONFIG_IPV6)
4140
		del_timer(&pmctx->ip6_mc_router_timer);
4141
#endif
4142
		br_multicast_rport_del_notify(pmctx, del);
4143 4144
		break;
	case MDB_RTR_TYPE_TEMP_QUERY:
4145 4146 4147 4148
		pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
		del |= br_ip4_multicast_rport_del(pmctx);
		del |= br_ip6_multicast_rport_del(pmctx);
		br_multicast_rport_del_notify(pmctx, del);
4149 4150
		break;
	case MDB_RTR_TYPE_PERM:
4151 4152 4153
		pmctx->multicast_router = MDB_RTR_TYPE_PERM;
		del_timer(&pmctx->ip4_mc_router_timer);
		br_ip4_multicast_add_router(brmctx, pmctx);
4154
#if IS_ENABLED(CONFIG_IPV6)
4155
		del_timer(&pmctx->ip6_mc_router_timer);
4156
#endif
4157
		br_ip6_multicast_add_router(brmctx, pmctx);
4158
		break;
4159
	case MDB_RTR_TYPE_TEMP:
4160 4161 4162
		pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
		br_ip4_multicast_mark_router(brmctx, pmctx);
		br_ip6_multicast_mark_router(brmctx, pmctx);
4163
		break;
4164 4165
	default:
		goto unlock;
4166
	}
4167 4168
	err = 0;
unlock:
4169
	spin_unlock(&p->br->multicast_lock);
4170 4171 4172

	return err;
}
4173

4174
static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
4175
				       struct bridge_mcast_own_query *query)
4176 4177
{
	struct net_bridge_port *port;
4178

4179 4180 4181
	if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
		return;

4182
	__br_multicast_open_query(brmctx->br, query);
4183

4184
	rcu_read_lock();
4185
	list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
4186 4187 4188 4189 4190 4191
		struct bridge_mcast_own_query *ip4_own_query;
#if IS_ENABLED(CONFIG_IPV6)
		struct bridge_mcast_own_query *ip6_own_query;
#endif

		if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
4192 4193
			continue;

4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212
		if (br_multicast_ctx_is_vlan(brmctx)) {
			struct net_bridge_vlan *vlan;

			vlan = br_vlan_find(nbp_vlan_group(port), brmctx->vlan->vid);
			if (!vlan ||
			    br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
				continue;

			ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
#if IS_ENABLED(CONFIG_IPV6)
			ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
#endif
		} else {
			ip4_own_query = &port->multicast_ctx.ip4_own_query;
#if IS_ENABLED(CONFIG_IPV6)
			ip6_own_query = &port->multicast_ctx.ip6_own_query;
#endif
		}

4213
		if (query == &brmctx->ip4_own_query)
4214
			br_multicast_enable(ip4_own_query);
4215 4216
#if IS_ENABLED(CONFIG_IPV6)
		else
4217
			br_multicast_enable(ip6_own_query);
4218
#endif
4219
	}
4220
	rcu_read_unlock();
4221 4222
}

4223 4224
int br_multicast_toggle(struct net_bridge *br, unsigned long val,
			struct netlink_ext_ack *extack)
4225
{
4226
	struct net_bridge_port *port;
4227
	bool change_snoopers = false;
4228
	int err = 0;
4229

4230
	spin_lock_bh(&br->multicast_lock);
4231
	if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
4232 4233
		goto unlock;

4234 4235 4236 4237 4238 4239
	err = br_mc_disabled_update(br->dev, val, extack);
	if (err == -EOPNOTSUPP)
		err = 0;
	if (err)
		goto unlock;

4240
	br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
4241
	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
4242
		change_snoopers = true;
4243
		goto unlock;
4244
	}
4245

4246 4247 4248
	if (!netif_running(br->dev))
		goto unlock;

4249 4250
	br_multicast_open(br);
	list_for_each_entry(port, &br->port_list, list)
4251
		__br_multicast_enable_port_ctx(&port->multicast_ctx);
4252

4253 4254
	change_snoopers = true;

4255
unlock:
4256
	spin_unlock_bh(&br->multicast_lock);
4257

4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276
	/* br_multicast_join_snoopers has the potential to cause
	 * an MLD Report/Leave to be delivered to br_multicast_rcv,
	 * which would in turn call br_multicast_add_group, which would
	 * attempt to acquire multicast_lock. This function should be
	 * called after the lock has been released to avoid deadlocks on
	 * multicast_lock.
	 *
	 * br_multicast_leave_snoopers does not have the problem since
	 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
	 * returns without calling br_multicast_ipv4/6_rcv if it's not
	 * enabled. Moved both functions out just for symmetry.
	 */
	if (change_snoopers) {
		if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
			br_multicast_join_snoopers(br);
		else
			br_multicast_leave_snoopers(br);
	}

4277
	return err;
4278
}
4279

4280 4281 4282 4283
bool br_multicast_enabled(const struct net_device *dev)
{
	struct net_bridge *br = netdev_priv(dev);

4284
	return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
4285 4286 4287
}
EXPORT_SYMBOL_GPL(br_multicast_enabled);

4288 4289 4290 4291 4292 4293
bool br_multicast_router(const struct net_device *dev)
{
	struct net_bridge *br = netdev_priv(dev);
	bool is_router;

	spin_lock_bh(&br->multicast_lock);
4294
	is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
4295 4296 4297 4298 4299
	spin_unlock_bh(&br->multicast_lock);
	return is_router;
}
EXPORT_SYMBOL_GPL(br_multicast_router);

4300 4301
int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
{
4302
	struct net_bridge_mcast *brmctx = &br->multicast_ctx;
4303 4304
	unsigned long max_delay;

4305 4306 4307
	val = !!val;

	spin_lock_bh(&br->multicast_lock);
4308
	if (brmctx->multicast_querier == val)
4309 4310
		goto unlock;

4311
	WRITE_ONCE(brmctx->multicast_querier, val);
4312 4313 4314
	if (!val)
		goto unlock;

4315
	max_delay = brmctx->multicast_query_response_interval;
4316

4317 4318
	if (!timer_pending(&brmctx->ip4_other_query.timer))
		brmctx->ip4_other_query.delay_time = jiffies + max_delay;
4319

4320
	br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
4321 4322

#if IS_ENABLED(CONFIG_IPV6)
4323 4324
	if (!timer_pending(&brmctx->ip6_other_query.timer))
		brmctx->ip6_other_query.delay_time = jiffies + max_delay;
4325

4326
	br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
4327
#endif
4328 4329 4330 4331 4332 4333 4334

unlock:
	spin_unlock_bh(&br->multicast_lock);

	return 0;
}

4335 4336
int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
				  unsigned long val)
4337 4338 4339 4340 4341 4342 4343 4344 4345 4346
{
	/* Currently we support only version 2 and 3 */
	switch (val) {
	case 2:
	case 3:
		break;
	default:
		return -EINVAL;
	}

4347 4348 4349
	spin_lock_bh(&brmctx->br->multicast_lock);
	brmctx->multicast_igmp_version = val;
	spin_unlock_bh(&brmctx->br->multicast_lock);
4350 4351 4352 4353

	return 0;
}

4354
#if IS_ENABLED(CONFIG_IPV6)
4355 4356
int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
				 unsigned long val)
4357 4358 4359 4360 4361 4362 4363 4364 4365 4366
{
	/* Currently we support version 1 and 2 */
	switch (val) {
	case 1:
	case 2:
		break;
	default:
		return -EINVAL;
	}

4367 4368 4369
	spin_lock_bh(&brmctx->br->multicast_lock);
	brmctx->multicast_mld_version = val;
	spin_unlock_bh(&brmctx->br->multicast_lock);
4370 4371 4372 4373 4374

	return 0;
}
#endif

4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401
/**
 * br_multicast_list_adjacent - Returns snooped multicast addresses
 * @dev:	The bridge port adjacent to which to retrieve addresses
 * @br_ip_list:	The list to store found, snooped multicast IP addresses in
 *
 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
 * snooping feature on all bridge ports of dev's bridge device, excluding
 * the addresses from dev itself.
 *
 * Returns the number of items added to br_ip_list.
 *
 * Notes:
 * - br_ip_list needs to be initialized by caller
 * - br_ip_list might contain duplicates in the end
 *   (needs to be taken care of by caller)
 * - br_ip_list needs to be freed by caller
 */
int br_multicast_list_adjacent(struct net_device *dev,
			       struct list_head *br_ip_list)
{
	struct net_bridge *br;
	struct net_bridge_port *port;
	struct net_bridge_port_group *group;
	struct br_ip_list *entry;
	int count = 0;

	rcu_read_lock();
4402
	if (!br_ip_list || !netif_is_bridge_port(dev))
4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419
		goto unlock;

	port = br_port_get_rcu(dev);
	if (!port || !port->br)
		goto unlock;

	br = port->br;

	list_for_each_entry_rcu(port, &br->port_list, list) {
		if (!port->dev || port->dev == dev)
			continue;

		hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
			entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
			if (!entry)
				goto unlock;

4420
			entry->addr = group->key.addr;
4421 4422 4423 4424 4425 4426 4427 4428 4429 4430
			list_add(&entry->list, br_ip_list);
			count++;
		}
	}

unlock:
	rcu_read_unlock();
	return count;
}
EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
4431

4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448
/**
 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
 * @dev: The bridge port providing the bridge on which to check for a querier
 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
 *
 * Checks whether the given interface has a bridge on top and if so returns
 * true if a valid querier exists anywhere on the bridged link layer.
 * Otherwise returns false.
 */
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
{
	struct net_bridge *br;
	struct net_bridge_port *port;
	struct ethhdr eth;
	bool ret = false;

	rcu_read_lock();
4449
	if (!netif_is_bridge_port(dev))
4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460
		goto unlock;

	port = br_port_get_rcu(dev);
	if (!port || !port->br)
		goto unlock;

	br = port->br;

	memset(&eth, 0, sizeof(eth));
	eth.h_proto = htons(proto);

4461
	ret = br_multicast_querier_exists(&br->multicast_ctx, &eth, NULL);
4462 4463 4464 4465 4466 4467 4468

unlock:
	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);

4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479
/**
 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
 * @dev: The bridge port adjacent to which to check for a querier
 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
 *
 * Checks whether the given interface has a bridge on top and if so returns
 * true if a selected querier is behind one of the other ports of this
 * bridge. Otherwise returns false.
 */
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
{
4480
	struct net_bridge_mcast *brmctx;
4481 4482 4483 4484 4485
	struct net_bridge *br;
	struct net_bridge_port *port;
	bool ret = false;

	rcu_read_lock();
4486
	if (!netif_is_bridge_port(dev))
4487 4488 4489 4490 4491 4492 4493
		goto unlock;

	port = br_port_get_rcu(dev);
	if (!port || !port->br)
		goto unlock;

	br = port->br;
4494
	brmctx = &br->multicast_ctx;
4495 4496 4497

	switch (proto) {
	case ETH_P_IP:
4498 4499
		if (!timer_pending(&brmctx->ip4_other_query.timer) ||
		    rcu_dereference(brmctx->ip4_querier.port) == port)
4500 4501
			goto unlock;
		break;
4502
#if IS_ENABLED(CONFIG_IPV6)
4503
	case ETH_P_IPV6:
4504 4505
		if (!timer_pending(&brmctx->ip6_other_query.timer) ||
		    rcu_dereference(brmctx->ip6_querier.port) == port)
4506 4507
			goto unlock;
		break;
4508
#endif
4509 4510 4511 4512 4513 4514 4515 4516 4517 4518
	default:
		goto unlock;
	}

	ret = true;
unlock:
	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
4519

4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530
/**
 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
 * @dev: The bridge port adjacent to which to check for a multicast router
 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
 *
 * Checks whether the given interface has a bridge on top and if so returns
 * true if a multicast router is behind one of the other ports of this
 * bridge. Otherwise returns false.
 */
bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
{
4531
	struct net_bridge_mcast_port *pmctx;
4532
	struct net_bridge_mcast *brmctx;
4533
	struct net_bridge_port *port;
4534 4535 4536 4537 4538 4539 4540
	bool ret = false;

	rcu_read_lock();
	port = br_port_get_check_rcu(dev);
	if (!port)
		goto unlock;

4541
	brmctx = &port->br->multicast_ctx;
4542 4543
	switch (proto) {
	case ETH_P_IP:
4544
		hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
4545
					 ip4_rlist) {
4546
			if (pmctx->port == port)
4547 4548 4549 4550 4551 4552 4553 4554
				continue;

			ret = true;
			goto unlock;
		}
		break;
#if IS_ENABLED(CONFIG_IPV6)
	case ETH_P_IPV6:
4555
		hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
4556
					 ip6_rlist) {
4557
			if (pmctx->port == port)
4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577
				continue;

			ret = true;
			goto unlock;
		}
		break;
#endif
	default:
		/* when compiled without IPv6 support, be conservative and
		 * always assume presence of an IPv6 multicast router
		 */
		ret = true;
	}

unlock:
	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);

4578
static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
4579
			       const struct sk_buff *skb, u8 type, u8 dir)
4580 4581
{
	struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
4582 4583
	__be16 proto = skb->protocol;
	unsigned int t_len;
4584 4585 4586 4587

	u64_stats_update_begin(&pstats->syncp);
	switch (proto) {
	case htons(ETH_P_IP):
4588
		t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599
		switch (type) {
		case IGMP_HOST_MEMBERSHIP_REPORT:
			pstats->mstats.igmp_v1reports[dir]++;
			break;
		case IGMPV2_HOST_MEMBERSHIP_REPORT:
			pstats->mstats.igmp_v2reports[dir]++;
			break;
		case IGMPV3_HOST_MEMBERSHIP_REPORT:
			pstats->mstats.igmp_v3reports[dir]++;
			break;
		case IGMP_HOST_MEMBERSHIP_QUERY:
4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614
			if (t_len != sizeof(struct igmphdr)) {
				pstats->mstats.igmp_v3queries[dir]++;
			} else {
				unsigned int offset = skb_transport_offset(skb);
				struct igmphdr *ih, _ihdr;

				ih = skb_header_pointer(skb, offset,
							sizeof(_ihdr), &_ihdr);
				if (!ih)
					break;
				if (!ih->code)
					pstats->mstats.igmp_v1queries[dir]++;
				else
					pstats->mstats.igmp_v2queries[dir]++;
			}
4615 4616 4617 4618 4619 4620 4621 4622
			break;
		case IGMP_HOST_LEAVE_MESSAGE:
			pstats->mstats.igmp_leaves[dir]++;
			break;
		}
		break;
#if IS_ENABLED(CONFIG_IPV6)
	case htons(ETH_P_IPV6):
4623 4624 4625
		t_len = ntohs(ipv6_hdr(skb)->payload_len) +
			sizeof(struct ipv6hdr);
		t_len -= skb_network_header_len(skb);
4626 4627 4628 4629 4630 4631 4632 4633
		switch (type) {
		case ICMPV6_MGM_REPORT:
			pstats->mstats.mld_v1reports[dir]++;
			break;
		case ICMPV6_MLD2_REPORT:
			pstats->mstats.mld_v2reports[dir]++;
			break;
		case ICMPV6_MGM_QUERY:
4634 4635 4636 4637
			if (t_len != sizeof(struct mld_msg))
				pstats->mstats.mld_v2queries[dir]++;
			else
				pstats->mstats.mld_v1queries[dir]++;
4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648
			break;
		case ICMPV6_MGM_REDUCTION:
			pstats->mstats.mld_leaves[dir]++;
			break;
		}
		break;
#endif /* CONFIG_IPV6 */
	}
	u64_stats_update_end(&pstats->syncp);
}

4649 4650
void br_multicast_count(struct net_bridge *br,
			const struct net_bridge_port *p,
4651
			const struct sk_buff *skb, u8 type, u8 dir)
4652 4653 4654 4655
{
	struct bridge_mcast_stats __percpu *stats;

	/* if multicast_disabled is true then igmp type can't be set */
4656
	if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
4657 4658 4659 4660 4661 4662 4663 4664 4665
		return;

	if (p)
		stats = p->mcast_stats;
	else
		stats = br->mcast_stats;
	if (WARN_ON(!stats))
		return;

4666
	br_mcast_stats_add(stats, skb, type, dir);
4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677
}

int br_multicast_init_stats(struct net_bridge *br)
{
	br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
	if (!br->mcast_stats)
		return -ENOMEM;

	return 0;
}

4678 4679 4680 4681 4682
void br_multicast_uninit_stats(struct net_bridge *br)
{
	free_percpu(br->mcast_stats);
}

4683 4684
/* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716
{
	dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
	dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
}

void br_multicast_get_stats(const struct net_bridge *br,
			    const struct net_bridge_port *p,
			    struct br_mcast_stats *dest)
{
	struct bridge_mcast_stats __percpu *stats;
	struct br_mcast_stats tdst;
	int i;

	memset(dest, 0, sizeof(*dest));
	if (p)
		stats = p->mcast_stats;
	else
		stats = br->mcast_stats;
	if (WARN_ON(!stats))
		return;

	memset(&tdst, 0, sizeof(tdst));
	for_each_possible_cpu(i) {
		struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
		struct br_mcast_stats temp;
		unsigned int start;

		do {
			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
			memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));

4717 4718 4719
		mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
		mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
		mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
4720 4721 4722 4723 4724 4725
		mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
		mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
		mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
		mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
		tdst.igmp_parse_errors += temp.igmp_parse_errors;

4726 4727
		mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
		mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
4728 4729 4730 4731 4732 4733 4734
		mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
		mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
		mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
		tdst.mld_parse_errors += temp.mld_parse_errors;
	}
	memcpy(dest, &tdst, sizeof(*dest));
}
4735 4736 4737

int br_mdb_hash_init(struct net_bridge *br)
{
4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750
	int err;

	err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
	if (err)
		return err;

	err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
	if (err) {
		rhashtable_destroy(&br->sg_port_tbl);
		return err;
	}

	return 0;
4751 4752 4753 4754
}

void br_mdb_hash_fini(struct net_bridge *br)
{
4755
	rhashtable_destroy(&br->sg_port_tbl);
4756 4757
	rhashtable_destroy(&br->mdb_hash_tbl);
}