br_multicast.c 60.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Bridge multicast support.
 *
 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

#include <linux/err.h>
14
#include <linux/export.h>
15 16 17 18
#include <linux/if_ether.h>
#include <linux/igmp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
19
#include <linux/log2.h>
20 21 22 23 24 25 26
#include <linux/netdevice.h>
#include <linux/netfilter_bridge.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/timer.h>
27
#include <linux/inetdevice.h>
28
#include <linux/mroute.h>
29
#include <net/ip.h>
30
#include <net/switchdev.h>
E
Eric Dumazet 已提交
31
#if IS_ENABLED(CONFIG_IPV6)
32 33
#include <net/ipv6.h>
#include <net/mld.h>
34
#include <net/ip6_checksum.h>
35
#include <net/addrconf.h>
36
#endif
37 38 39

#include "br_private.h"

40
static void br_multicast_start_querier(struct net_bridge *br,
41
				       struct bridge_mcast_own_query *query);
42 43
static void br_multicast_add_router(struct net_bridge *br,
				    struct net_bridge_port *port);
44 45 46
static void br_ip4_multicast_leave_group(struct net_bridge *br,
					 struct net_bridge_port *port,
					 __be32 group,
F
Felix Fietkau 已提交
47 48 49
					 __u16 vid,
					 const unsigned char *src);

50
static void __del_port_router(struct net_bridge_port *p);
51 52 53 54
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_group(struct net_bridge *br,
					 struct net_bridge_port *port,
					 const struct in6_addr *group,
F
Felix Fietkau 已提交
55
					 __u16 vid, const unsigned char *src);
56
#endif
C
Cong Wang 已提交
57
unsigned int br_mdb_rehash_seq;
58

59 60 61 62
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
{
	if (a->proto != b->proto)
		return 0;
63 64
	if (a->vid != b->vid)
		return 0;
65 66 67
	switch (a->proto) {
	case htons(ETH_P_IP):
		return a->u.ip4 == b->u.ip4;
E
Eric Dumazet 已提交
68
#if IS_ENABLED(CONFIG_IPV6)
69 70 71
	case htons(ETH_P_IPV6):
		return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
#endif
72 73 74 75
	}
	return 0;
}

76 77
static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
				__u16 vid)
78
{
79
	return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
80 81
}

E
Eric Dumazet 已提交
82
#if IS_ENABLED(CONFIG_IPV6)
83
static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
84 85
				const struct in6_addr *ip,
				__u16 vid)
86
{
87 88
	return jhash_2words(ipv6_addr_hash(ip), vid,
			    mdb->secret) & (mdb->max - 1);
89 90 91
}
#endif

92 93 94 95 96
static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
			     struct br_ip *ip)
{
	switch (ip->proto) {
	case htons(ETH_P_IP):
97
		return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
E
Eric Dumazet 已提交
98
#if IS_ENABLED(CONFIG_IPV6)
99
	case htons(ETH_P_IPV6):
100
		return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
101
#endif
102 103
	}
	return 0;
104 105 106
}

static struct net_bridge_mdb_entry *__br_mdb_ip_get(
107
	struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
108 109 110
{
	struct net_bridge_mdb_entry *mp;

111
	hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
112
		if (br_ip_equal(&mp->addr, dst))
113 114 115 116 117 118
			return mp;
	}

	return NULL;
}

119 120
struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
					   struct br_ip *dst)
121 122 123 124 125 126 127
{
	if (!mdb)
		return NULL;

	return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
}

128
static struct net_bridge_mdb_entry *br_mdb_ip4_get(
129
	struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
130
{
131 132 133 134
	struct br_ip br_dst;

	br_dst.u.ip4 = dst;
	br_dst.proto = htons(ETH_P_IP);
135
	br_dst.vid = vid;
136

137
	return br_mdb_ip_get(mdb, &br_dst);
138 139
}

E
Eric Dumazet 已提交
140
#if IS_ENABLED(CONFIG_IPV6)
141
static struct net_bridge_mdb_entry *br_mdb_ip6_get(
142 143
	struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
	__u16 vid)
144 145
{
	struct br_ip br_dst;
146

A
Alexey Dobriyan 已提交
147
	br_dst.u.ip6 = *dst;
148
	br_dst.proto = htons(ETH_P_IPV6);
149
	br_dst.vid = vid;
150

151
	return br_mdb_ip_get(mdb, &br_dst);
152 153 154
}
#endif

155
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
156
					struct sk_buff *skb, u16 vid)
157
{
158
	struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
159 160
	struct br_ip ip;

161
	if (br->multicast_disabled)
162 163
		return NULL;

164
	if (BR_INPUT_SKB_CB(skb)->igmp)
165 166
		return NULL;

167
	ip.proto = skb->protocol;
168
	ip.vid = vid;
169

170 171
	switch (skb->protocol) {
	case htons(ETH_P_IP):
172 173
		ip.u.ip4 = ip_hdr(skb)->daddr;
		break;
E
Eric Dumazet 已提交
174
#if IS_ENABLED(CONFIG_IPV6)
175
	case htons(ETH_P_IPV6):
A
Alexey Dobriyan 已提交
176
		ip.u.ip6 = ipv6_hdr(skb)->daddr;
177 178
		break;
#endif
179 180
	default:
		return NULL;
181 182
	}

183
	return br_mdb_ip_get(mdb, &ip);
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
}

static void br_mdb_free(struct rcu_head *head)
{
	struct net_bridge_mdb_htable *mdb =
		container_of(head, struct net_bridge_mdb_htable, rcu);
	struct net_bridge_mdb_htable *old = mdb->old;

	mdb->old = NULL;
	kfree(old->mhash);
	kfree(old);
}

static int br_mdb_copy(struct net_bridge_mdb_htable *new,
		       struct net_bridge_mdb_htable *old,
		       int elasticity)
{
	struct net_bridge_mdb_entry *mp;
	int maxlen;
	int len;
	int i;

	for (i = 0; i < old->max; i++)
207
		hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
208
			hlist_add_head(&mp->hlist[new->ver],
209
				       &new->mhash[br_ip_hash(new, &mp->addr)]);
210 211 212 213 214 215 216

	if (!elasticity)
		return 0;

	maxlen = 0;
	for (i = 0; i < new->max; i++) {
		len = 0;
217
		hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
218 219 220 221 222 223 224 225
			len++;
		if (len > maxlen)
			maxlen = len;
	}

	return maxlen > elasticity ? -EINVAL : 0;
}

226
void br_multicast_free_pg(struct rcu_head *head)
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
{
	struct net_bridge_port_group *p =
		container_of(head, struct net_bridge_port_group, rcu);

	kfree(p);
}

static void br_multicast_free_group(struct rcu_head *head)
{
	struct net_bridge_mdb_entry *mp =
		container_of(head, struct net_bridge_mdb_entry, rcu);

	kfree(mp);
}

static void br_multicast_group_expired(unsigned long data)
{
	struct net_bridge_mdb_entry *mp = (void *)data;
	struct net_bridge *br = mp->br;
	struct net_bridge_mdb_htable *mdb;

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) || timer_pending(&mp->timer))
		goto out;

252
	mp->mglist = false;
253 254 255 256

	if (mp->ports)
		goto out;

257 258
	mdb = mlock_dereference(br->mdb, br);

259 260 261 262 263 264 265 266 267 268 269 270
	hlist_del_rcu(&mp->hlist[mdb->ver]);
	mdb->size--;

	call_rcu_bh(&mp->rcu, br_multicast_free_group);

out:
	spin_unlock(&br->multicast_lock);
}

static void br_multicast_del_pg(struct net_bridge *br,
				struct net_bridge_port_group *pg)
{
271
	struct net_bridge_mdb_htable *mdb;
272 273
	struct net_bridge_mdb_entry *mp;
	struct net_bridge_port_group *p;
274 275 276
	struct net_bridge_port_group __rcu **pp;

	mdb = mlock_dereference(br->mdb, br);
277

278
	mp = br_mdb_ip_get(mdb, &pg->addr);
279 280 281
	if (WARN_ON(!mp))
		return;

282 283 284
	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
285 286 287
		if (p != pg)
			continue;

288
		rcu_assign_pointer(*pp, p->next);
289 290
		hlist_del_init(&p->mglist);
		del_timer(&p->timer);
291 292
		br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
			      p->flags);
293 294
		call_rcu_bh(&p->rcu, br_multicast_free_pg);

295
		if (!mp->ports && !mp->mglist &&
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
		    netif_running(br->dev))
			mod_timer(&mp->timer, jiffies);

		return;
	}

	WARN_ON(1);
}

static void br_multicast_port_group_expired(unsigned long data)
{
	struct net_bridge_port_group *pg = (void *)data;
	struct net_bridge *br = pg->port->br;

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
312
	    hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
313 314 315 316 317 318 319 320
		goto out;

	br_multicast_del_pg(br, pg);

out:
	spin_unlock(&br->multicast_lock);
}

321
static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
322 323
			 int elasticity)
{
324
	struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	struct net_bridge_mdb_htable *mdb;
	int err;

	mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
	if (!mdb)
		return -ENOMEM;

	mdb->max = max;
	mdb->old = old;

	mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
	if (!mdb->mhash) {
		kfree(mdb);
		return -ENOMEM;
	}

	mdb->size = old ? old->size : 0;
	mdb->ver = old ? old->ver ^ 1 : 0;

	if (!old || elasticity)
		get_random_bytes(&mdb->secret, sizeof(mdb->secret));
	else
		mdb->secret = old->secret;

	if (!old)
		goto out;

	err = br_mdb_copy(mdb, old, elasticity);
	if (err) {
		kfree(mdb->mhash);
		kfree(mdb);
		return err;
	}

C
Cong Wang 已提交
359
	br_mdb_rehash_seq++;
360 361 362 363 364 365 366 367
	call_rcu_bh(&mdb->rcu, br_mdb_free);

out:
	rcu_assign_pointer(*mdbp, mdb);

	return 0;
}

368
static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
369 370
						    __be32 group,
						    u8 *igmp_type)
371
{
372 373
	struct igmpv3_query *ihv3;
	size_t igmp_hdr_size;
374 375 376 377 378
	struct sk_buff *skb;
	struct igmphdr *ih;
	struct ethhdr *eth;
	struct iphdr *iph;

379 380 381
	igmp_hdr_size = sizeof(*ih);
	if (br->multicast_igmp_version == 3)
		igmp_hdr_size = sizeof(*ihv3);
382
	skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
383
						 igmp_hdr_size + 4);
384 385 386 387 388 389 390 391
	if (!skb)
		goto out;

	skb->protocol = htons(ETH_P_IP);

	skb_reset_mac_header(skb);
	eth = eth_hdr(skb);

392
	ether_addr_copy(eth->h_source, br->dev->dev_addr);
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	eth->h_dest[0] = 1;
	eth->h_dest[1] = 0;
	eth->h_dest[2] = 0x5e;
	eth->h_dest[3] = 0;
	eth->h_dest[4] = 0;
	eth->h_dest[5] = 1;
	eth->h_proto = htons(ETH_P_IP);
	skb_put(skb, sizeof(*eth));

	skb_set_network_header(skb, skb->len);
	iph = ip_hdr(skb);

	iph->version = 4;
	iph->ihl = 6;
	iph->tos = 0xc0;
408
	iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
409 410 411 412
	iph->id = 0;
	iph->frag_off = htons(IP_DF);
	iph->ttl = 1;
	iph->protocol = IPPROTO_IGMP;
413 414
	iph->saddr = br->multicast_query_use_ifaddr ?
		     inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
415 416 417 418 419 420 421 422 423
	iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
	((u8 *)&iph[1])[0] = IPOPT_RA;
	((u8 *)&iph[1])[1] = 4;
	((u8 *)&iph[1])[2] = 0;
	((u8 *)&iph[1])[3] = 0;
	ip_send_check(iph);
	skb_put(skb, 24);

	skb_set_transport_header(skb, skb->len);
424
	*igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
425

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
	switch (br->multicast_igmp_version) {
	case 2:
		ih = igmp_hdr(skb);
		ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
		ih->code = (group ? br->multicast_last_member_interval :
				    br->multicast_query_response_interval) /
			   (HZ / IGMP_TIMER_SCALE);
		ih->group = group;
		ih->csum = 0;
		ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
		break;
	case 3:
		ihv3 = igmpv3_query_hdr(skb);
		ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
		ihv3->code = (group ? br->multicast_last_member_interval :
				      br->multicast_query_response_interval) /
			     (HZ / IGMP_TIMER_SCALE);
		ihv3->group = group;
		ihv3->qqic = br->multicast_query_interval / HZ;
		ihv3->nsrcs = 0;
		ihv3->resv = 0;
		ihv3->suppress = 0;
		ihv3->qrv = 2;
		ihv3->csum = 0;
		ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
		break;
	}

	skb_put(skb, igmp_hdr_size);
455 456 457 458 459 460
	__skb_pull(skb, sizeof(*eth));

out:
	return skb;
}

E
Eric Dumazet 已提交
461
#if IS_ENABLED(CONFIG_IPV6)
462
static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
463 464
						    const struct in6_addr *grp,
						    u8 *igmp_type)
465
{
466 467
	struct mld2_query *mld2q;
	unsigned long interval;
468 469
	struct ipv6hdr *ip6h;
	struct mld_msg *mldq;
470 471
	size_t mld_hdr_size;
	struct sk_buff *skb;
472 473 474
	struct ethhdr *eth;
	u8 *hopopt;

475 476 477
	mld_hdr_size = sizeof(*mldq);
	if (br->multicast_mld_version == 2)
		mld_hdr_size = sizeof(*mld2q);
478
	skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
479
						 8 + mld_hdr_size);
480 481 482 483 484 485 486 487 488
	if (!skb)
		goto out;

	skb->protocol = htons(ETH_P_IPV6);

	/* Ethernet header */
	skb_reset_mac_header(skb);
	eth = eth_hdr(skb);

489
	ether_addr_copy(eth->h_source, br->dev->dev_addr);
490 491 492 493 494 495 496 497
	eth->h_proto = htons(ETH_P_IPV6);
	skb_put(skb, sizeof(*eth));

	/* IPv6 header + HbH option */
	skb_set_network_header(skb, skb->len);
	ip6h = ipv6_hdr(skb);

	*(__force __be32 *)ip6h = htonl(0x60000000);
498
	ip6h->payload_len = htons(8 + mld_hdr_size);
499 500
	ip6h->nexthdr = IPPROTO_HOPOPTS;
	ip6h->hop_limit = 1;
501
	ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
502 503 504
	if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
			       &ip6h->saddr)) {
		kfree_skb(skb);
505
		br->has_ipv6_addr = 0;
506 507
		return NULL;
	}
508 509

	br->has_ipv6_addr = 1;
510
	ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
511 512 513 514 515 516 517 518

	hopopt = (u8 *)(ip6h + 1);
	hopopt[0] = IPPROTO_ICMPV6;		/* next hdr */
	hopopt[1] = 0;				/* length of HbH */
	hopopt[2] = IPV6_TLV_ROUTERALERT;	/* Router Alert */
	hopopt[3] = 2;				/* Length of RA Option */
	hopopt[4] = 0;				/* Type = 0x0000 (MLD) */
	hopopt[5] = 0;
519 520
	hopopt[6] = IPV6_TLV_PAD1;		/* Pad1 */
	hopopt[7] = IPV6_TLV_PAD1;		/* Pad1 */
521 522 523 524 525

	skb_put(skb, sizeof(*ip6h) + 8);

	/* ICMPv6 */
	skb_set_transport_header(skb, skb->len);
526
	interval = ipv6_addr_any(grp) ?
527 528
			br->multicast_query_response_interval :
			br->multicast_last_member_interval;
529
	*igmp_type = ICMPV6_MGM_QUERY;
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
	switch (br->multicast_mld_version) {
	case 1:
		mldq = (struct mld_msg *)icmp6_hdr(skb);
		mldq->mld_type = ICMPV6_MGM_QUERY;
		mldq->mld_code = 0;
		mldq->mld_cksum = 0;
		mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
		mldq->mld_reserved = 0;
		mldq->mld_mca = *grp;
		mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
						  sizeof(*mldq), IPPROTO_ICMPV6,
						  csum_partial(mldq,
							       sizeof(*mldq),
							       0));
		break;
	case 2:
		mld2q = (struct mld2_query *)icmp6_hdr(skb);
547
		mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
		mld2q->mld2q_type = ICMPV6_MGM_QUERY;
		mld2q->mld2q_code = 0;
		mld2q->mld2q_cksum = 0;
		mld2q->mld2q_resv1 = 0;
		mld2q->mld2q_resv2 = 0;
		mld2q->mld2q_suppress = 0;
		mld2q->mld2q_qrv = 2;
		mld2q->mld2q_nsrcs = 0;
		mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
		mld2q->mld2q_mca = *grp;
		mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
						     sizeof(*mld2q),
						     IPPROTO_ICMPV6,
						     csum_partial(mld2q,
								  sizeof(*mld2q),
								  0));
		break;
	}
	skb_put(skb, mld_hdr_size);
567 568 569 570 571 572 573 574

	__skb_pull(skb, sizeof(*eth));

out:
	return skb;
}
#endif

575
static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
576 577
						struct br_ip *addr,
						u8 *igmp_type)
578 579 580
{
	switch (addr->proto) {
	case htons(ETH_P_IP):
581
		return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
E
Eric Dumazet 已提交
582
#if IS_ENABLED(CONFIG_IPV6)
583
	case htons(ETH_P_IPV6):
584 585
		return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
						    igmp_type);
586
#endif
587 588 589 590
	}
	return NULL;
}

591
static struct net_bridge_mdb_entry *br_multicast_get_group(
592 593
	struct net_bridge *br, struct net_bridge_port *port,
	struct br_ip *group, int hash)
594
{
595
	struct net_bridge_mdb_htable *mdb;
596
	struct net_bridge_mdb_entry *mp;
597 598
	unsigned int count = 0;
	unsigned int max;
599 600 601
	int elasticity;
	int err;

602
	mdb = rcu_dereference_protected(br->mdb, 1);
603
	hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
604
		count++;
605
		if (unlikely(br_ip_equal(group, &mp->addr)))
606 607 608 609 610 611 612 613
			return mp;
	}

	elasticity = 0;
	max = mdb->max;

	if (unlikely(count > br->hash_elasticity && count)) {
		if (net_ratelimit())
614 615 616
			br_info(br, "Multicast hash table "
				"chain limit reached: %s\n",
				port ? port->dev->name : br->dev->name);
617 618 619 620 621 622

		elasticity = br->hash_elasticity;
	}

	if (mdb->size >= max) {
		max *= 2;
623 624 625 626 627
		if (unlikely(max > br->hash_max)) {
			br_warn(br, "Multicast hash table maximum of %d "
				"reached, disabling snooping: %s\n",
				br->hash_max,
				port ? port->dev->name : br->dev->name);
628 629 630 631 632 633 634 635 636 637
			err = -E2BIG;
disable:
			br->multicast_disabled = 1;
			goto err;
		}
	}

	if (max > mdb->max || elasticity) {
		if (mdb->old) {
			if (net_ratelimit())
638 639 640
				br_info(br, "Multicast hash table "
					"on fire: %s\n",
					port ? port->dev->name : br->dev->name);
641 642 643 644 645 646
			err = -EEXIST;
			goto err;
		}

		err = br_mdb_rehash(&br->mdb, max, elasticity);
		if (err) {
647 648 649 650
			br_warn(br, "Cannot rehash multicast "
				"hash table, disabling snooping: %s, %d, %d\n",
				port ? port->dev->name : br->dev->name,
				mdb->size, err);
651 652 653 654 655 656 657 658 659 660 661 662 663 664
			goto disable;
		}

		err = -EAGAIN;
		goto err;
	}

	return NULL;

err:
	mp = ERR_PTR(err);
	return mp;
}

665
struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
666 667
						    struct net_bridge_port *p,
						    struct br_ip *group)
668
{
669
	struct net_bridge_mdb_htable *mdb;
670 671
	struct net_bridge_mdb_entry *mp;
	int hash;
672
	int err;
673

674
	mdb = rcu_dereference_protected(br->mdb, 1);
675
	if (!mdb) {
676 677 678
		err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
		if (err)
			return ERR_PTR(err);
679 680 681 682
		goto rehash;
	}

	hash = br_ip_hash(mdb, group);
683
	mp = br_multicast_get_group(br, p, group, hash);
684 685 686 687 688 689
	switch (PTR_ERR(mp)) {
	case 0:
		break;

	case -EAGAIN:
rehash:
690
		mdb = rcu_dereference_protected(br->mdb, 1);
691 692 693 694 695 696 697 698 699
		hash = br_ip_hash(mdb, group);
		break;

	default:
		goto out;
	}

	mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
	if (unlikely(!mp))
700
		return ERR_PTR(-ENOMEM);
701 702

	mp->br = br;
703
	mp->addr = *group;
704 705 706
	setup_timer(&mp->timer, br_multicast_group_expired,
		    (unsigned long)mp);

707 708 709 710 711 712 713
	hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
	mdb->size++;

out:
	return mp;
}

714 715 716
struct net_bridge_port_group *br_multicast_new_port_group(
			struct net_bridge_port *port,
			struct br_ip *group,
717
			struct net_bridge_port_group __rcu *next,
F
Felix Fietkau 已提交
718 719
			unsigned char flags,
			const unsigned char *src)
720 721 722 723 724 725 726 727 728
{
	struct net_bridge_port_group *p;

	p = kzalloc(sizeof(*p), GFP_ATOMIC);
	if (unlikely(!p))
		return NULL;

	p->addr = *group;
	p->port = port;
729
	p->flags = flags;
730
	rcu_assign_pointer(p->next, next);
731 732 733
	hlist_add_head(&p->mglist, &port->mglist);
	setup_timer(&p->timer, br_multicast_port_group_expired,
		    (unsigned long)p);
F
Felix Fietkau 已提交
734 735 736 737 738 739

	if (src)
		memcpy(p->eth_addr, src, ETH_ALEN);
	else
		memset(p->eth_addr, 0xff, ETH_ALEN);

740 741 742
	return p;
}

F
Felix Fietkau 已提交
743 744 745 746 747 748 749 750 751 752 753 754 755
static bool br_port_group_equal(struct net_bridge_port_group *p,
				struct net_bridge_port *port,
				const unsigned char *src)
{
	if (p->port != port)
		return false;

	if (!(port->flags & BR_MULTICAST_TO_UNICAST))
		return true;

	return ether_addr_equal(src, p->eth_addr);
}

756
static int br_multicast_add_group(struct net_bridge *br,
757
				  struct net_bridge_port *port,
F
Felix Fietkau 已提交
758 759
				  struct br_ip *group,
				  const unsigned char *src)
760
{
761
	struct net_bridge_port_group __rcu **pp;
762 763
	struct net_bridge_port_group *p;
	struct net_bridge_mdb_entry *mp;
764
	unsigned long now = jiffies;
765 766 767 768 769 770 771 772 773
	int err;

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) ||
	    (port && port->state == BR_STATE_DISABLED))
		goto out;

	mp = br_multicast_new_group(br, port, group);
	err = PTR_ERR(mp);
774
	if (IS_ERR(mp))
775 776 777
		goto err;

	if (!port) {
778
		mp->mglist = true;
779
		mod_timer(&mp->timer, now + br->multicast_membership_interval);
780 781 782
		goto out;
	}

783 784 785
	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
F
Felix Fietkau 已提交
786
		if (br_port_group_equal(p, port, src))
787
			goto found;
788 789 790 791
		if ((unsigned long)p->port < (unsigned long)port)
			break;
	}

F
Felix Fietkau 已提交
792
	p = br_multicast_new_port_group(port, group, *pp, 0, src);
793 794 795
	if (unlikely(!p))
		goto err;
	rcu_assign_pointer(*pp, p);
796
	br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
797

798 799
found:
	mod_timer(&p->timer, now + br->multicast_membership_interval);
800 801 802 803 804 805 806 807
out:
	err = 0;

err:
	spin_unlock(&br->multicast_lock);
	return err;
}

808 809
static int br_ip4_multicast_add_group(struct net_bridge *br,
				      struct net_bridge_port *port,
810
				      __be32 group,
F
Felix Fietkau 已提交
811 812
				      __u16 vid,
				      const unsigned char *src)
813 814 815 816 817 818 819 820
{
	struct br_ip br_group;

	if (ipv4_is_local_multicast(group))
		return 0;

	br_group.u.ip4 = group;
	br_group.proto = htons(ETH_P_IP);
821
	br_group.vid = vid;
822

F
Felix Fietkau 已提交
823
	return br_multicast_add_group(br, port, &br_group, src);
824 825
}

E
Eric Dumazet 已提交
826
#if IS_ENABLED(CONFIG_IPV6)
827 828
static int br_ip6_multicast_add_group(struct net_bridge *br,
				      struct net_bridge_port *port,
829
				      const struct in6_addr *group,
F
Felix Fietkau 已提交
830 831
				      __u16 vid,
				      const unsigned char *src)
832 833 834
{
	struct br_ip br_group;

835
	if (ipv6_addr_is_ll_all_nodes(group))
836 837
		return 0;

A
Alexey Dobriyan 已提交
838
	br_group.u.ip6 = *group;
839
	br_group.proto = htons(ETH_P_IPV6);
840
	br_group.vid = vid;
841

F
Felix Fietkau 已提交
842
	return br_multicast_add_group(br, port, &br_group, src);
843 844 845
}
#endif

846 847 848 849 850 851
static void br_multicast_router_expired(unsigned long data)
{
	struct net_bridge_port *port = (void *)data;
	struct net_bridge *br = port->br;

	spin_lock(&br->multicast_lock);
852 853
	if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
	    port->multicast_router == MDB_RTR_TYPE_PERM ||
854
	    timer_pending(&port->multicast_router_timer))
855 856
		goto out;

857
	__del_port_router(port);
858 859 860 861 862 863 864 865
out:
	spin_unlock(&br->multicast_lock);
}

static void br_multicast_local_router_expired(unsigned long data)
{
}

866
static void br_multicast_querier_expired(struct net_bridge *br,
867
					 struct bridge_mcast_own_query *query)
868 869 870 871 872
{
	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) || br->multicast_disabled)
		goto out;

873
	br_multicast_start_querier(br, query);
874 875 876 877 878

out:
	spin_unlock(&br->multicast_lock);
}

879 880 881 882
static void br_ip4_multicast_querier_expired(unsigned long data)
{
	struct net_bridge *br = (void *)data;

883
	br_multicast_querier_expired(br, &br->ip4_own_query);
884 885 886 887 888 889 890
}

#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_querier_expired(unsigned long data)
{
	struct net_bridge *br = (void *)data;

891
	br_multicast_querier_expired(br, &br->ip6_own_query);
892 893 894
}
#endif

895 896 897 898 899 900 901 902 903 904 905 906
static void br_multicast_select_own_querier(struct net_bridge *br,
					    struct br_ip *ip,
					    struct sk_buff *skb)
{
	if (ip->proto == htons(ETH_P_IP))
		br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
#if IS_ENABLED(CONFIG_IPV6)
	else
		br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
#endif
}

907 908 909
static void __br_multicast_send_query(struct net_bridge *br,
				      struct net_bridge_port *port,
				      struct br_ip *ip)
910 911
{
	struct sk_buff *skb;
912
	u8 igmp_type;
913

914
	skb = br_multicast_alloc_query(br, ip, &igmp_type);
915
	if (!skb)
916
		return;
917 918 919

	if (port) {
		skb->dev = port->dev;
920
		br_multicast_count(br, port, skb, igmp_type,
921
				   BR_MCAST_DIR_TX);
922 923
		NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
			dev_net(port->dev), NULL, skb, NULL, skb->dev,
924
			br_dev_queue_push_xmit);
925 926
	} else {
		br_multicast_select_own_querier(br, ip, skb);
927
		br_multicast_count(br, port, skb, igmp_type,
928
				   BR_MCAST_DIR_RX);
929
		netif_rx(skb);
930
	}
931 932 933
}

static void br_multicast_send_query(struct net_bridge *br,
934
				    struct net_bridge_port *port,
935
				    struct bridge_mcast_own_query *own_query)
936
{
937
	struct bridge_mcast_other_query *other_query = NULL;
938 939
	struct br_ip br_group;
	unsigned long time;
940 941

	if (!netif_running(br->dev) || br->multicast_disabled ||
942
	    !br->multicast_querier)
943 944
		return;

945 946
	memset(&br_group.u, 0, sizeof(br_group.u));

947 948 949
	if (port ? (own_query == &port->ip4_own_query) :
		   (own_query == &br->ip4_own_query)) {
		other_query = &br->ip4_other_query;
950
		br_group.proto = htons(ETH_P_IP);
E
Eric Dumazet 已提交
951
#if IS_ENABLED(CONFIG_IPV6)
952
	} else {
953
		other_query = &br->ip6_other_query;
954
		br_group.proto = htons(ETH_P_IPV6);
955
#endif
956 957
	}

958
	if (!other_query || timer_pending(&other_query->timer))
959 960 961
		return;

	__br_multicast_send_query(br, port, &br_group);
962 963

	time = jiffies;
964
	time += own_query->startup_sent < br->multicast_startup_query_count ?
965 966
		br->multicast_startup_query_interval :
		br->multicast_query_interval;
967
	mod_timer(&own_query->timer, time);
968 969
}

970 971 972
static void
br_multicast_port_query_expired(struct net_bridge_port *port,
				struct bridge_mcast_own_query *query)
973 974 975 976
{
	struct net_bridge *br = port->br;

	spin_lock(&br->multicast_lock);
977 978
	if (port->state == BR_STATE_DISABLED ||
	    port->state == BR_STATE_BLOCKING)
979 980
		goto out;

981 982
	if (query->startup_sent < br->multicast_startup_query_count)
		query->startup_sent++;
983

984
	br_multicast_send_query(port->br, port, query);
985 986 987 988 989

out:
	spin_unlock(&br->multicast_lock);
}

990 991 992 993
static void br_ip4_multicast_port_query_expired(unsigned long data)
{
	struct net_bridge_port *port = (void *)data;

994
	br_multicast_port_query_expired(port, &port->ip4_own_query);
995 996 997 998 999 1000 1001
}

#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_port_query_expired(unsigned long data)
{
	struct net_bridge_port *port = (void *)data;

1002
	br_multicast_port_query_expired(port, &port->ip6_own_query);
1003 1004 1005
}
#endif

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
static void br_mc_disabled_update(struct net_device *dev, bool value)
{
	struct switchdev_attr attr = {
		.orig_dev = dev,
		.id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
		.flags = SWITCHDEV_F_DEFER,
		.u.mc_disabled = value,
	};

	switchdev_port_attr_set(dev, &attr);
}

1018
int br_multicast_add_port(struct net_bridge_port *port)
1019
{
1020
	port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1021 1022 1023

	setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
		    (unsigned long)port);
1024 1025
	setup_timer(&port->ip4_own_query.timer,
		    br_ip4_multicast_port_query_expired, (unsigned long)port);
1026
#if IS_ENABLED(CONFIG_IPV6)
1027 1028
	setup_timer(&port->ip6_own_query.timer,
		    br_ip6_multicast_port_query_expired, (unsigned long)port);
1029
#endif
1030 1031
	br_mc_disabled_update(port->dev, port->br->multicast_disabled);

1032 1033 1034 1035 1036
	port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
	if (!port->mcast_stats)
		return -ENOMEM;

	return 0;
1037 1038 1039 1040
}

void br_multicast_del_port(struct net_bridge_port *port)
{
1041 1042 1043 1044 1045 1046 1047 1048 1049
	struct net_bridge *br = port->br;
	struct net_bridge_port_group *pg;
	struct hlist_node *n;

	/* Take care of the remaining groups, only perm ones should be left */
	spin_lock_bh(&br->multicast_lock);
	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
		br_multicast_del_pg(br, pg);
	spin_unlock_bh(&br->multicast_lock);
1050
	del_timer_sync(&port->multicast_router_timer);
1051
	free_percpu(port->mcast_stats);
1052 1053
}

1054
static void br_multicast_enable(struct bridge_mcast_own_query *query)
1055
{
1056
	query->startup_sent = 0;
1057

1058 1059 1060
	if (try_to_del_timer_sync(&query->timer) >= 0 ||
	    del_timer(&query->timer))
		mod_timer(&query->timer, jiffies);
1061 1062
}

1063
static void __br_multicast_enable_port(struct net_bridge_port *port)
1064 1065 1066 1067
{
	struct net_bridge *br = port->br;

	if (br->multicast_disabled || !netif_running(br->dev))
1068
		return;
1069

1070
	br_multicast_enable(&port->ip4_own_query);
1071
#if IS_ENABLED(CONFIG_IPV6)
1072
	br_multicast_enable(&port->ip6_own_query);
1073
#endif
1074 1075
	if (port->multicast_router == MDB_RTR_TYPE_PERM &&
	    hlist_unhashed(&port->rlist))
1076
		br_multicast_add_router(br, port);
1077
}
1078

1079 1080 1081 1082 1083 1084
void br_multicast_enable_port(struct net_bridge_port *port)
{
	struct net_bridge *br = port->br;

	spin_lock(&br->multicast_lock);
	__br_multicast_enable_port(port);
1085 1086 1087 1088 1089 1090 1091
	spin_unlock(&br->multicast_lock);
}

void br_multicast_disable_port(struct net_bridge_port *port)
{
	struct net_bridge *br = port->br;
	struct net_bridge_port_group *pg;
1092
	struct hlist_node *n;
1093 1094

	spin_lock(&br->multicast_lock);
1095
	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1096
		if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1097
			br_multicast_del_pg(br, pg);
1098

1099 1100
	__del_port_router(port);

1101
	del_timer(&port->multicast_router_timer);
1102
	del_timer(&port->ip4_own_query.timer);
1103
#if IS_ENABLED(CONFIG_IPV6)
1104
	del_timer(&port->ip6_own_query.timer);
1105
#endif
1106 1107 1108
	spin_unlock(&br->multicast_lock);
}

1109 1110
static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
					 struct net_bridge_port *port,
1111 1112
					 struct sk_buff *skb,
					 u16 vid)
1113
{
F
Felix Fietkau 已提交
1114
	const unsigned char *src;
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
	struct igmpv3_report *ih;
	struct igmpv3_grec *grec;
	int i;
	int len;
	int num;
	int type;
	int err = 0;
	__be32 group;

	ih = igmpv3_report_hdr(skb);
	num = ntohs(ih->ngrec);
1126
	len = skb_transport_offset(skb) + sizeof(*ih);
1127 1128 1129 1130 1131 1132

	for (i = 0; i < num; i++) {
		len += sizeof(*grec);
		if (!pskb_may_pull(skb, len))
			return -EINVAL;

H
Herbert Xu 已提交
1133
		grec = (void *)(skb->data + len - sizeof(*grec));
1134 1135 1136
		group = grec->grec_mca;
		type = grec->grec_type;

E
Eric Dumazet 已提交
1137
		len += ntohs(grec->grec_nsrcs) * 4;
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
		if (!pskb_may_pull(skb, len))
			return -EINVAL;

		/* We treat this as an IGMPv2 report for now. */
		switch (type) {
		case IGMPV3_MODE_IS_INCLUDE:
		case IGMPV3_MODE_IS_EXCLUDE:
		case IGMPV3_CHANGE_TO_INCLUDE:
		case IGMPV3_CHANGE_TO_EXCLUDE:
		case IGMPV3_ALLOW_NEW_SOURCES:
		case IGMPV3_BLOCK_OLD_SOURCES:
			break;

		default:
			continue;
		}

F
Felix Fietkau 已提交
1155
		src = eth_hdr(skb)->h_source;
1156 1157 1158
		if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
		     type == IGMPV3_MODE_IS_INCLUDE) &&
		    ntohs(grec->grec_nsrcs) == 0) {
F
Felix Fietkau 已提交
1159
			br_ip4_multicast_leave_group(br, port, group, vid, src);
1160
		} else {
F
Felix Fietkau 已提交
1161 1162
			err = br_ip4_multicast_add_group(br, port, group, vid,
							 src);
1163 1164 1165
			if (err)
				break;
		}
1166 1167 1168 1169 1170
	}

	return err;
}

E
Eric Dumazet 已提交
1171
#if IS_ENABLED(CONFIG_IPV6)
1172 1173
static int br_ip6_multicast_mld2_report(struct net_bridge *br,
					struct net_bridge_port *port,
1174 1175
					struct sk_buff *skb,
					u16 vid)
1176
{
F
Felix Fietkau 已提交
1177
	const unsigned char *src;
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	struct icmp6hdr *icmp6h;
	struct mld2_grec *grec;
	int i;
	int len;
	int num;
	int err = 0;

	if (!pskb_may_pull(skb, sizeof(*icmp6h)))
		return -EINVAL;

	icmp6h = icmp6_hdr(skb);
	num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1190
	len = skb_transport_offset(skb) + sizeof(*icmp6h);
1191 1192 1193 1194 1195 1196

	for (i = 0; i < num; i++) {
		__be16 *nsrcs, _nsrcs;

		nsrcs = skb_header_pointer(skb,
					   len + offsetof(struct mld2_grec,
1197
							  grec_nsrcs),
1198 1199 1200 1201 1202 1203
					   sizeof(_nsrcs), &_nsrcs);
		if (!nsrcs)
			return -EINVAL;

		if (!pskb_may_pull(skb,
				   len + sizeof(*grec) +
1204
				   sizeof(struct in6_addr) * ntohs(*nsrcs)))
1205 1206 1207
			return -EINVAL;

		grec = (struct mld2_grec *)(skb->data + len);
1208 1209
		len += sizeof(*grec) +
		       sizeof(struct in6_addr) * ntohs(*nsrcs);
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224

		/* We treat these as MLDv1 reports for now. */
		switch (grec->grec_type) {
		case MLD2_MODE_IS_INCLUDE:
		case MLD2_MODE_IS_EXCLUDE:
		case MLD2_CHANGE_TO_INCLUDE:
		case MLD2_CHANGE_TO_EXCLUDE:
		case MLD2_ALLOW_NEW_SOURCES:
		case MLD2_BLOCK_OLD_SOURCES:
			break;

		default:
			continue;
		}

F
Felix Fietkau 已提交
1225
		src = eth_hdr(skb)->h_source;
1226 1227 1228 1229
		if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
		     grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
		    ntohs(*nsrcs) == 0) {
			br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
F
Felix Fietkau 已提交
1230
						     vid, src);
1231 1232
		} else {
			err = br_ip6_multicast_add_group(br, port,
F
Felix Fietkau 已提交
1233 1234
							 &grec->grec_mca, vid,
							 src);
1235
			if (err)
1236 1237
				break;
		}
1238 1239 1240 1241 1242 1243
	}

	return err;
}
#endif

1244
static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1245
					    struct net_bridge_port *port,
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
					    __be32 saddr)
{
	if (!timer_pending(&br->ip4_own_query.timer) &&
	    !timer_pending(&br->ip4_other_query.timer))
		goto update;

	if (!br->ip4_querier.addr.u.ip4)
		goto update;

	if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
		goto update;

	return false;

update:
	br->ip4_querier.addr.u.ip4 = saddr;

1263 1264 1265
	/* update protected by general multicast_lock by caller */
	rcu_assign_pointer(br->ip4_querier.port, port);

1266 1267 1268 1269 1270
	return true;
}

#if IS_ENABLED(CONFIG_IPV6)
static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1271
					    struct net_bridge_port *port,
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
					    struct in6_addr *saddr)
{
	if (!timer_pending(&br->ip6_own_query.timer) &&
	    !timer_pending(&br->ip6_other_query.timer))
		goto update;

	if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
		goto update;

	return false;

update:
	br->ip6_querier.addr.u.ip6 = *saddr;

1286 1287 1288
	/* update protected by general multicast_lock by caller */
	rcu_assign_pointer(br->ip6_querier.port, port);

1289 1290 1291 1292 1293
	return true;
}
#endif

static bool br_multicast_select_querier(struct net_bridge *br,
1294
					struct net_bridge_port *port,
1295 1296 1297 1298
					struct br_ip *saddr)
{
	switch (saddr->proto) {
	case htons(ETH_P_IP):
1299
		return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1300 1301
#if IS_ENABLED(CONFIG_IPV6)
	case htons(ETH_P_IPV6):
1302
		return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1303 1304 1305 1306 1307 1308
#endif
	}

	return false;
}

1309
static void
1310 1311 1312
br_multicast_update_query_timer(struct net_bridge *br,
				struct bridge_mcast_other_query *query,
				unsigned long max_delay)
1313
{
1314 1315
	if (!timer_pending(&query->timer))
		query->delay_time = jiffies + max_delay;
1316

1317
	mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1318 1319
}

1320
/*
C
Cong Wang 已提交
1321
 * Add port to router_list
1322 1323 1324
 *  list is maintained ordered by pointer value
 *  and locked by br->multicast_lock and RCU
 */
1325 1326 1327
static void br_multicast_add_router(struct net_bridge *br,
				    struct net_bridge_port *port)
{
1328
	struct net_bridge_port *p;
1329
	struct hlist_node *slot = NULL;
1330

1331 1332 1333
	if (!hlist_unhashed(&port->rlist))
		return;

1334
	hlist_for_each_entry(p, &br->router_list, rlist) {
1335 1336
		if ((unsigned long) port >= (unsigned long) p)
			break;
1337
		slot = &p->rlist;
1338 1339
	}

1340
	if (slot)
1341
		hlist_add_behind_rcu(&port->rlist, slot);
1342 1343
	else
		hlist_add_head_rcu(&port->rlist, &br->router_list);
1344
	br_rtr_notify(br->dev, port, RTM_NEWMDB);
1345 1346
}

1347 1348 1349 1350 1351 1352
static void br_multicast_mark_router(struct net_bridge *br,
				     struct net_bridge_port *port)
{
	unsigned long now = jiffies;

	if (!port) {
1353
		if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY)
1354 1355 1356 1357 1358
			mod_timer(&br->multicast_router_timer,
				  now + br->multicast_querier_interval);
		return;
	}

1359 1360
	if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
	    port->multicast_router == MDB_RTR_TYPE_PERM)
1361 1362
		return;

1363
	br_multicast_add_router(br, port);
1364 1365 1366 1367 1368 1369 1370

	mod_timer(&port->multicast_router_timer,
		  now + br->multicast_querier_interval);
}

static void br_multicast_query_received(struct net_bridge *br,
					struct net_bridge_port *port,
1371
					struct bridge_mcast_other_query *query,
1372
					struct br_ip *saddr,
1373
					unsigned long max_delay)
1374
{
1375
	if (!br_multicast_select_querier(br, port, saddr))
1376 1377
		return;

1378
	br_multicast_update_query_timer(br, query, max_delay);
1379 1380 1381
	br_multicast_mark_router(br, port);
}

1382 1383
static int br_ip4_multicast_query(struct net_bridge *br,
				  struct net_bridge_port *port,
1384 1385
				  struct sk_buff *skb,
				  u16 vid)
1386
{
1387
	const struct iphdr *iph = ip_hdr(skb);
1388 1389 1390 1391
	struct igmphdr *ih = igmp_hdr(skb);
	struct net_bridge_mdb_entry *mp;
	struct igmpv3_query *ih3;
	struct net_bridge_port_group *p;
1392
	struct net_bridge_port_group __rcu **pp;
1393
	struct br_ip saddr;
1394 1395
	unsigned long max_delay;
	unsigned long now = jiffies;
1396
	unsigned int offset = skb_transport_offset(skb);
1397
	__be32 group;
1398
	int err = 0;
1399 1400 1401 1402 1403 1404 1405 1406

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) ||
	    (port && port->state == BR_STATE_DISABLED))
		goto out;

	group = ih->group;

1407
	if (skb->len == offset + sizeof(*ih)) {
1408 1409 1410 1411 1412 1413
		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);

		if (!max_delay) {
			max_delay = 10 * HZ;
			group = 0;
		}
1414
	} else if (skb->len >= offset + sizeof(*ih3)) {
1415 1416
		ih3 = igmpv3_query_hdr(skb);
		if (ih3->nsrcs)
1417
			goto out;
1418

1419 1420
		max_delay = ih3->code ?
			    IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1421
	} else {
1422 1423 1424
		goto out;
	}

1425 1426 1427
	if (!group) {
		saddr.proto = htons(ETH_P_IP);
		saddr.u.ip4 = iph->saddr;
1428

1429 1430
		br_multicast_query_received(br, port, &br->ip4_other_query,
					    &saddr, max_delay);
1431
		goto out;
1432
	}
1433

1434
	mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1435 1436 1437 1438 1439
	if (!mp)
		goto out;

	max_delay *= br->multicast_last_member_count;

1440
	if (mp->mglist &&
1441 1442 1443 1444 1445
	    (timer_pending(&mp->timer) ?
	     time_after(mp->timer.expires, now + max_delay) :
	     try_to_del_timer_sync(&mp->timer) >= 0))
		mod_timer(&mp->timer, now + max_delay);

1446 1447 1448
	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
1449 1450 1451
		if (timer_pending(&p->timer) ?
		    time_after(p->timer.expires, now + max_delay) :
		    try_to_del_timer_sync(&p->timer) >= 0)
1452
			mod_timer(&p->timer, now + max_delay);
1453 1454 1455 1456
	}

out:
	spin_unlock(&br->multicast_lock);
1457
	return err;
1458 1459
}

E
Eric Dumazet 已提交
1460
#if IS_ENABLED(CONFIG_IPV6)
1461 1462
static int br_ip6_multicast_query(struct net_bridge *br,
				  struct net_bridge_port *port,
1463 1464
				  struct sk_buff *skb,
				  u16 vid)
1465
{
1466
	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1467
	struct mld_msg *mld;
1468 1469
	struct net_bridge_mdb_entry *mp;
	struct mld2_query *mld2q;
1470 1471
	struct net_bridge_port_group *p;
	struct net_bridge_port_group __rcu **pp;
1472
	struct br_ip saddr;
1473 1474
	unsigned long max_delay;
	unsigned long now = jiffies;
1475
	unsigned int offset = skb_transport_offset(skb);
1476
	const struct in6_addr *group = NULL;
1477
	bool is_general_query;
1478 1479 1480 1481 1482 1483 1484
	int err = 0;

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) ||
	    (port && port->state == BR_STATE_DISABLED))
		goto out;

1485 1486
	if (skb->len == offset + sizeof(*mld)) {
		if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1487 1488 1489 1490
			err = -EINVAL;
			goto out;
		}
		mld = (struct mld_msg *) icmp6_hdr(skb);
L
Li RongQing 已提交
1491
		max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1492 1493
		if (max_delay)
			group = &mld->mld_mca;
1494
	} else {
1495
		if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1496 1497 1498 1499 1500 1501
			err = -EINVAL;
			goto out;
		}
		mld2q = (struct mld2_query *)icmp6_hdr(skb);
		if (!mld2q->mld2q_nsrcs)
			group = &mld2q->mld2q_mca;
1502 1503

		max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1504 1505
	}

1506 1507
	is_general_query = group && ipv6_addr_any(group);

1508 1509 1510
	if (is_general_query) {
		saddr.proto = htons(ETH_P_IPV6);
		saddr.u.ip6 = ip6h->saddr;
1511

1512 1513
		br_multicast_query_received(br, port, &br->ip6_other_query,
					    &saddr, max_delay);
1514
		goto out;
1515 1516
	} else if (!group) {
		goto out;
1517
	}
1518

1519
	mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1520 1521 1522 1523
	if (!mp)
		goto out;

	max_delay *= br->multicast_last_member_count;
1524
	if (mp->mglist &&
1525 1526 1527 1528 1529
	    (timer_pending(&mp->timer) ?
	     time_after(mp->timer.expires, now + max_delay) :
	     try_to_del_timer_sync(&mp->timer) >= 0))
		mod_timer(&mp->timer, now + max_delay);

1530 1531 1532
	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
1533 1534 1535
		if (timer_pending(&p->timer) ?
		    time_after(p->timer.expires, now + max_delay) :
		    try_to_del_timer_sync(&p->timer) >= 0)
1536
			mod_timer(&p->timer, now + max_delay);
1537 1538 1539 1540 1541 1542 1543 1544
	}

out:
	spin_unlock(&br->multicast_lock);
	return err;
}
#endif

1545 1546 1547 1548 1549
static void
br_multicast_leave_group(struct net_bridge *br,
			 struct net_bridge_port *port,
			 struct br_ip *group,
			 struct bridge_mcast_other_query *other_query,
F
Felix Fietkau 已提交
1550 1551
			 struct bridge_mcast_own_query *own_query,
			 const unsigned char *src)
1552 1553 1554 1555 1556 1557 1558 1559 1560
{
	struct net_bridge_mdb_htable *mdb;
	struct net_bridge_mdb_entry *mp;
	struct net_bridge_port_group *p;
	unsigned long now;
	unsigned long time;

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) ||
1561
	    (port && port->state == BR_STATE_DISABLED))
1562 1563
		goto out;

1564
	mdb = mlock_dereference(br->mdb, br);
1565 1566 1567 1568
	mp = br_mdb_ip_get(mdb, group);
	if (!mp)
		goto out;

1569 1570 1571 1572 1573 1574
	if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
		struct net_bridge_port_group __rcu **pp;

		for (pp = &mp->ports;
		     (p = mlock_dereference(*pp, br)) != NULL;
		     pp = &p->next) {
F
Felix Fietkau 已提交
1575
			if (!br_port_group_equal(p, port, src))
1576 1577 1578 1579 1580 1581
				continue;

			rcu_assign_pointer(*pp, p->next);
			hlist_del_init(&p->mglist);
			del_timer(&p->timer);
			call_rcu_bh(&p->rcu, br_multicast_free_pg);
1582 1583
			br_mdb_notify(br->dev, port, group, RTM_DELMDB,
				      p->flags);
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594

			if (!mp->ports && !mp->mglist &&
			    netif_running(br->dev))
				mod_timer(&mp->timer, jiffies);
		}
		goto out;
	}

	if (timer_pending(&other_query->timer))
		goto out;

1595
	if (br->multicast_querier) {
1596 1597 1598 1599
		__br_multicast_send_query(br, port, &mp->addr);

		time = jiffies + br->multicast_last_member_count *
				 br->multicast_last_member_interval;
1600

1601
		mod_timer(&own_query->timer, time);
1602 1603 1604 1605

		for (p = mlock_dereference(mp->ports, br);
		     p != NULL;
		     p = mlock_dereference(p->next, br)) {
F
Felix Fietkau 已提交
1606
			if (!br_port_group_equal(p, port, src))
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
				continue;

			if (!hlist_unhashed(&p->mglist) &&
			    (timer_pending(&p->timer) ?
			     time_after(p->timer.expires, time) :
			     try_to_del_timer_sync(&p->timer) >= 0)) {
				mod_timer(&p->timer, time);
			}

			break;
		}
	}

1620 1621 1622 1623 1624
	now = jiffies;
	time = now + br->multicast_last_member_count *
		     br->multicast_last_member_interval;

	if (!port) {
1625
		if (mp->mglist &&
1626 1627 1628 1629 1630
		    (timer_pending(&mp->timer) ?
		     time_after(mp->timer.expires, time) :
		     try_to_del_timer_sync(&mp->timer) >= 0)) {
			mod_timer(&mp->timer, time);
		}
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648

		goto out;
	}

	for (p = mlock_dereference(mp->ports, br);
	     p != NULL;
	     p = mlock_dereference(p->next, br)) {
		if (p->port != port)
			continue;

		if (!hlist_unhashed(&p->mglist) &&
		    (timer_pending(&p->timer) ?
		     time_after(p->timer.expires, time) :
		     try_to_del_timer_sync(&p->timer) >= 0)) {
			mod_timer(&p->timer, time);
		}

		break;
1649 1650 1651 1652 1653
	}
out:
	spin_unlock(&br->multicast_lock);
}

1654 1655
static void br_ip4_multicast_leave_group(struct net_bridge *br,
					 struct net_bridge_port *port,
1656
					 __be32 group,
F
Felix Fietkau 已提交
1657 1658
					 __u16 vid,
					 const unsigned char *src)
1659 1660
{
	struct br_ip br_group;
1661
	struct bridge_mcast_own_query *own_query;
1662 1663 1664 1665

	if (ipv4_is_local_multicast(group))
		return;

1666 1667
	own_query = port ? &port->ip4_own_query : &br->ip4_own_query;

1668 1669
	br_group.u.ip4 = group;
	br_group.proto = htons(ETH_P_IP);
1670
	br_group.vid = vid;
1671

1672
	br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
F
Felix Fietkau 已提交
1673
				 own_query, src);
1674 1675
}

E
Eric Dumazet 已提交
1676
#if IS_ENABLED(CONFIG_IPV6)
1677 1678
static void br_ip6_multicast_leave_group(struct net_bridge *br,
					 struct net_bridge_port *port,
1679
					 const struct in6_addr *group,
F
Felix Fietkau 已提交
1680 1681
					 __u16 vid,
					 const unsigned char *src)
1682 1683
{
	struct br_ip br_group;
1684
	struct bridge_mcast_own_query *own_query;
1685

1686
	if (ipv6_addr_is_ll_all_nodes(group))
1687 1688
		return;

1689 1690
	own_query = port ? &port->ip6_own_query : &br->ip6_own_query;

A
Alexey Dobriyan 已提交
1691
	br_group.u.ip6 = *group;
1692
	br_group.proto = htons(ETH_P_IPV6);
1693
	br_group.vid = vid;
1694

1695
	br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
F
Felix Fietkau 已提交
1696
				 own_query, src);
1697 1698
}
#endif
1699

1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
static void br_multicast_err_count(const struct net_bridge *br,
				   const struct net_bridge_port *p,
				   __be16 proto)
{
	struct bridge_mcast_stats __percpu *stats;
	struct bridge_mcast_stats *pstats;

	if (!br->multicast_stats_enabled)
		return;

	if (p)
		stats = p->mcast_stats;
	else
		stats = br->mcast_stats;
	if (WARN_ON(!stats))
		return;

	pstats = this_cpu_ptr(stats);

	u64_stats_update_begin(&pstats->syncp);
	switch (proto) {
	case htons(ETH_P_IP):
		pstats->mstats.igmp_parse_errors++;
		break;
#if IS_ENABLED(CONFIG_IPV6)
	case htons(ETH_P_IPV6):
		pstats->mstats.mld_parse_errors++;
		break;
#endif
	}
	u64_stats_update_end(&pstats->syncp);
}

1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
static void br_multicast_pim(struct net_bridge *br,
			     struct net_bridge_port *port,
			     const struct sk_buff *skb)
{
	unsigned int offset = skb_transport_offset(skb);
	struct pimhdr *pimhdr, _pimhdr;

	pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
	if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
	    pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
		return;

	br_multicast_mark_router(br, port);
}

1748 1749
static int br_multicast_ipv4_rcv(struct net_bridge *br,
				 struct net_bridge_port *port,
1750 1751
				 struct sk_buff *skb,
				 u16 vid)
1752
{
1753
	struct sk_buff *skb_trimmed = NULL;
F
Felix Fietkau 已提交
1754
	const unsigned char *src;
1755 1756 1757
	struct igmphdr *ih;
	int err;

1758
	err = ip_mc_check_igmp(skb, &skb_trimmed);
1759

1760
	if (err == -ENOMSG) {
1761
		if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1762
			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1763 1764 1765 1766
		} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
			if (ip_hdr(skb)->protocol == IPPROTO_PIM)
				br_multicast_pim(br, port, skb);
		}
1767
		return 0;
1768
	} else if (err < 0) {
1769
		br_multicast_err_count(br, port, skb->protocol);
1770
		return err;
1771
	}
1772

1773
	ih = igmp_hdr(skb);
F
Felix Fietkau 已提交
1774
	src = eth_hdr(skb)->h_source;
1775
	BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1776 1777 1778 1779

	switch (ih->type) {
	case IGMP_HOST_MEMBERSHIP_REPORT:
	case IGMPV2_HOST_MEMBERSHIP_REPORT:
1780
		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
F
Felix Fietkau 已提交
1781
		err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
1782 1783
		break;
	case IGMPV3_HOST_MEMBERSHIP_REPORT:
1784
		err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
1785 1786
		break;
	case IGMP_HOST_MEMBERSHIP_QUERY:
1787
		err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
1788 1789
		break;
	case IGMP_HOST_LEAVE_MESSAGE:
F
Felix Fietkau 已提交
1790
		br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
1791 1792 1793
		break;
	}

1794
	if (skb_trimmed && skb_trimmed != skb)
1795 1796
		kfree_skb(skb_trimmed);

1797
	br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1798 1799
			   BR_MCAST_DIR_RX);

1800 1801 1802
	return err;
}

E
Eric Dumazet 已提交
1803
#if IS_ENABLED(CONFIG_IPV6)
1804 1805
static int br_multicast_ipv6_rcv(struct net_bridge *br,
				 struct net_bridge_port *port,
1806 1807
				 struct sk_buff *skb,
				 u16 vid)
1808
{
1809
	struct sk_buff *skb_trimmed = NULL;
F
Felix Fietkau 已提交
1810
	const unsigned char *src;
1811
	struct mld_msg *mld;
1812 1813
	int err;

1814
	err = ipv6_mc_check_mld(skb, &skb_trimmed);
1815

1816 1817 1818
	if (err == -ENOMSG) {
		if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1819
		return 0;
1820
	} else if (err < 0) {
1821
		br_multicast_err_count(br, port, skb->protocol);
1822
		return err;
1823 1824
	}

1825
	mld = (struct mld_msg *)skb_transport_header(skb);
1826
	BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1827

1828
	switch (mld->mld_type) {
1829
	case ICMPV6_MGM_REPORT:
F
Felix Fietkau 已提交
1830
		src = eth_hdr(skb)->h_source;
1831
		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
F
Felix Fietkau 已提交
1832 1833
		err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
						 src);
1834 1835
		break;
	case ICMPV6_MLD2_REPORT:
1836
		err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
1837 1838
		break;
	case ICMPV6_MGM_QUERY:
1839
		err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
1840 1841
		break;
	case ICMPV6_MGM_REDUCTION:
F
Felix Fietkau 已提交
1842 1843
		src = eth_hdr(skb)->h_source;
		br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
1844
		break;
1845 1846
	}

1847
	if (skb_trimmed && skb_trimmed != skb)
1848 1849
		kfree_skb(skb_trimmed);

1850
	br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1851 1852
			   BR_MCAST_DIR_RX);

1853 1854 1855 1856
	return err;
}
#endif

1857
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1858
		     struct sk_buff *skb, u16 vid)
1859
{
1860 1861
	int ret = 0;

1862 1863 1864
	BR_INPUT_SKB_CB(skb)->igmp = 0;
	BR_INPUT_SKB_CB(skb)->mrouters_only = 0;

1865 1866 1867 1868 1869
	if (br->multicast_disabled)
		return 0;

	switch (skb->protocol) {
	case htons(ETH_P_IP):
1870 1871
		ret = br_multicast_ipv4_rcv(br, port, skb, vid);
		break;
E
Eric Dumazet 已提交
1872
#if IS_ENABLED(CONFIG_IPV6)
1873
	case htons(ETH_P_IPV6):
1874 1875
		ret = br_multicast_ipv6_rcv(br, port, skb, vid);
		break;
1876
#endif
1877 1878
	}

1879
	return ret;
1880 1881
}

1882
static void br_multicast_query_expired(struct net_bridge *br,
1883 1884
				       struct bridge_mcast_own_query *query,
				       struct bridge_mcast_querier *querier)
1885 1886 1887 1888 1889
{
	spin_lock(&br->multicast_lock);
	if (query->startup_sent < br->multicast_startup_query_count)
		query->startup_sent++;

1890
	RCU_INIT_POINTER(querier->port, NULL);
1891 1892 1893 1894 1895
	br_multicast_send_query(br, NULL, query);
	spin_unlock(&br->multicast_lock);
}

static void br_ip4_multicast_query_expired(unsigned long data)
1896 1897 1898
{
	struct net_bridge *br = (void *)data;

1899
	br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1900
}
1901

1902 1903 1904 1905
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_query_expired(unsigned long data)
{
	struct net_bridge *br = (void *)data;
1906

1907
	br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1908
}
1909
#endif
1910 1911 1912 1913 1914 1915

void br_multicast_init(struct net_bridge *br)
{
	br->hash_elasticity = 4;
	br->hash_max = 512;

1916
	br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1917
	br->multicast_querier = 0;
1918
	br->multicast_query_use_ifaddr = 0;
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928
	br->multicast_last_member_count = 2;
	br->multicast_startup_query_count = 2;

	br->multicast_last_member_interval = HZ;
	br->multicast_query_response_interval = 10 * HZ;
	br->multicast_startup_query_interval = 125 * HZ / 4;
	br->multicast_query_interval = 125 * HZ;
	br->multicast_querier_interval = 255 * HZ;
	br->multicast_membership_interval = 260 * HZ;

1929
	br->ip4_other_query.delay_time = 0;
1930
	br->ip4_querier.port = NULL;
1931
	br->multicast_igmp_version = 2;
1932
#if IS_ENABLED(CONFIG_IPV6)
1933
	br->multicast_mld_version = 1;
1934
	br->ip6_other_query.delay_time = 0;
1935
	br->ip6_querier.port = NULL;
1936
#endif
1937
	br->has_ipv6_addr = 1;
1938

1939 1940 1941
	spin_lock_init(&br->multicast_lock);
	setup_timer(&br->multicast_router_timer,
		    br_multicast_local_router_expired, 0);
1942 1943 1944
	setup_timer(&br->ip4_other_query.timer,
		    br_ip4_multicast_querier_expired, (unsigned long)br);
	setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired,
1945
		    (unsigned long)br);
1946
#if IS_ENABLED(CONFIG_IPV6)
1947 1948 1949
	setup_timer(&br->ip6_other_query.timer,
		    br_ip6_multicast_querier_expired, (unsigned long)br);
	setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired,
1950 1951
		    (unsigned long)br);
#endif
1952 1953
}

1954
static void __br_multicast_open(struct net_bridge *br,
1955
				struct bridge_mcast_own_query *query)
1956
{
1957
	query->startup_sent = 0;
1958 1959 1960 1961

	if (br->multicast_disabled)
		return;

1962 1963 1964 1965 1966
	mod_timer(&query->timer, jiffies);
}

void br_multicast_open(struct net_bridge *br)
{
1967
	__br_multicast_open(br, &br->ip4_own_query);
1968
#if IS_ENABLED(CONFIG_IPV6)
1969
	__br_multicast_open(br, &br->ip6_own_query);
1970
#endif
1971 1972 1973 1974 1975
}

void br_multicast_stop(struct net_bridge *br)
{
	del_timer_sync(&br->multicast_router_timer);
1976 1977
	del_timer_sync(&br->ip4_other_query.timer);
	del_timer_sync(&br->ip4_own_query.timer);
1978
#if IS_ENABLED(CONFIG_IPV6)
1979 1980
	del_timer_sync(&br->ip6_other_query.timer);
	del_timer_sync(&br->ip6_own_query.timer);
1981
#endif
1982 1983 1984 1985 1986 1987 1988 1989 1990
}

void br_multicast_dev_del(struct net_bridge *br)
{
	struct net_bridge_mdb_htable *mdb;
	struct net_bridge_mdb_entry *mp;
	struct hlist_node *n;
	u32 ver;
	int i;
1991 1992

	spin_lock_bh(&br->multicast_lock);
1993
	mdb = mlock_dereference(br->mdb, br);
1994 1995 1996 1997 1998 1999 2000
	if (!mdb)
		goto out;

	br->mdb = NULL;

	ver = mdb->ver;
	for (i = 0; i < mdb->max; i++) {
2001
		hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
2002 2003 2004 2005 2006 2007 2008 2009
					  hlist[ver]) {
			del_timer(&mp->timer);
			call_rcu_bh(&mp->rcu, br_multicast_free_group);
		}
	}

	if (mdb->old) {
		spin_unlock_bh(&br->multicast_lock);
2010
		rcu_barrier_bh();
2011 2012 2013 2014 2015 2016 2017 2018 2019
		spin_lock_bh(&br->multicast_lock);
		WARN_ON(mdb->old);
	}

	mdb->old = mdb;
	call_rcu_bh(&mdb->rcu, br_mdb_free);

out:
	spin_unlock_bh(&br->multicast_lock);
2020 2021

	free_percpu(br->mcast_stats);
2022
}
2023 2024 2025

int br_multicast_set_router(struct net_bridge *br, unsigned long val)
{
2026
	int err = -EINVAL;
2027 2028 2029 2030

	spin_lock_bh(&br->multicast_lock);

	switch (val) {
2031 2032
	case MDB_RTR_TYPE_DISABLED:
	case MDB_RTR_TYPE_PERM:
2033 2034
		del_timer(&br->multicast_router_timer);
		/* fall through */
2035
	case MDB_RTR_TYPE_TEMP_QUERY:
2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
		br->multicast_router = val;
		err = 0;
		break;
	}

	spin_unlock_bh(&br->multicast_lock);

	return err;
}

2046 2047 2048 2049 2050 2051
static void __del_port_router(struct net_bridge_port *p)
{
	if (hlist_unhashed(&p->rlist))
		return;
	hlist_del_init_rcu(&p->rlist);
	br_rtr_notify(p->br->dev, p, RTM_DELMDB);
2052 2053 2054 2055

	/* don't allow timer refresh */
	if (p->multicast_router == MDB_RTR_TYPE_TEMP)
		p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2056 2057
}

2058 2059 2060
int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
{
	struct net_bridge *br = p->br;
2061
	unsigned long now = jiffies;
2062
	int err = -EINVAL;
2063 2064

	spin_lock(&br->multicast_lock);
2065
	if (p->multicast_router == val) {
2066 2067 2068 2069
		/* Refresh the temp router port timer */
		if (p->multicast_router == MDB_RTR_TYPE_TEMP)
			mod_timer(&p->multicast_router_timer,
				  now + br->multicast_querier_interval);
2070 2071 2072
		err = 0;
		goto unlock;
	}
2073
	switch (val) {
2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084
	case MDB_RTR_TYPE_DISABLED:
		p->multicast_router = MDB_RTR_TYPE_DISABLED;
		__del_port_router(p);
		del_timer(&p->multicast_router_timer);
		break;
	case MDB_RTR_TYPE_TEMP_QUERY:
		p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
		__del_port_router(p);
		break;
	case MDB_RTR_TYPE_PERM:
		p->multicast_router = MDB_RTR_TYPE_PERM;
2085 2086 2087
		del_timer(&p->multicast_router_timer);
		br_multicast_add_router(br, p);
		break;
2088 2089 2090 2091
	case MDB_RTR_TYPE_TEMP:
		p->multicast_router = MDB_RTR_TYPE_TEMP;
		br_multicast_mark_router(br, p);
		break;
2092 2093
	default:
		goto unlock;
2094
	}
2095 2096
	err = 0;
unlock:
2097 2098 2099 2100
	spin_unlock(&br->multicast_lock);

	return err;
}
2101

2102
static void br_multicast_start_querier(struct net_bridge *br,
2103
				       struct bridge_mcast_own_query *query)
2104 2105
{
	struct net_bridge_port *port;
2106

2107
	__br_multicast_open(br, query);
2108 2109 2110 2111 2112 2113

	list_for_each_entry(port, &br->port_list, list) {
		if (port->state == BR_STATE_DISABLED ||
		    port->state == BR_STATE_BLOCKING)
			continue;

2114 2115
		if (query == &br->ip4_own_query)
			br_multicast_enable(&port->ip4_own_query);
2116 2117
#if IS_ENABLED(CONFIG_IPV6)
		else
2118
			br_multicast_enable(&port->ip6_own_query);
2119
#endif
2120 2121 2122 2123 2124
	}
}

int br_multicast_toggle(struct net_bridge *br, unsigned long val)
{
2125
	struct net_bridge_mdb_htable *mdb;
2126 2127
	struct net_bridge_port *port;
	int err = 0;
2128

2129
	spin_lock_bh(&br->multicast_lock);
2130 2131 2132
	if (br->multicast_disabled == !val)
		goto unlock;

2133
	br_mc_disabled_update(br->dev, !val);
2134 2135 2136 2137
	br->multicast_disabled = !val;
	if (br->multicast_disabled)
		goto unlock;

2138 2139 2140
	if (!netif_running(br->dev))
		goto unlock;

2141 2142 2143
	mdb = mlock_dereference(br->mdb, br);
	if (mdb) {
		if (mdb->old) {
2144 2145 2146 2147 2148 2149
			err = -EEXIST;
rollback:
			br->multicast_disabled = !!val;
			goto unlock;
		}

2150
		err = br_mdb_rehash(&br->mdb, mdb->max,
2151 2152 2153 2154 2155
				    br->hash_elasticity);
		if (err)
			goto rollback;
	}

2156 2157 2158
	br_multicast_open(br);
	list_for_each_entry(port, &br->port_list, list)
		__br_multicast_enable_port(port);
2159 2160

unlock:
2161
	spin_unlock_bh(&br->multicast_lock);
2162 2163 2164

	return err;
}
2165

2166 2167
int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
{
2168 2169
	unsigned long max_delay;

2170 2171 2172 2173 2174 2175 2176
	val = !!val;

	spin_lock_bh(&br->multicast_lock);
	if (br->multicast_querier == val)
		goto unlock;

	br->multicast_querier = val;
2177 2178 2179 2180 2181
	if (!val)
		goto unlock;

	max_delay = br->multicast_query_response_interval;

2182 2183
	if (!timer_pending(&br->ip4_other_query.timer))
		br->ip4_other_query.delay_time = jiffies + max_delay;
2184

2185
	br_multicast_start_querier(br, &br->ip4_own_query);
2186 2187

#if IS_ENABLED(CONFIG_IPV6)
2188 2189
	if (!timer_pending(&br->ip6_other_query.timer))
		br->ip6_other_query.delay_time = jiffies + max_delay;
2190

2191
	br_multicast_start_querier(br, &br->ip6_own_query);
2192
#endif
2193 2194 2195 2196 2197 2198 2199

unlock:
	spin_unlock_bh(&br->multicast_lock);

	return 0;
}

2200 2201
int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
{
2202
	int err = -EINVAL;
2203
	u32 old;
2204
	struct net_bridge_mdb_htable *mdb;
2205

2206
	spin_lock_bh(&br->multicast_lock);
2207 2208
	if (!is_power_of_2(val))
		goto unlock;
2209 2210 2211

	mdb = mlock_dereference(br->mdb, br);
	if (mdb && val < mdb->size)
2212 2213 2214 2215 2216 2217 2218
		goto unlock;

	err = 0;

	old = br->hash_max;
	br->hash_max = val;

2219 2220
	if (mdb) {
		if (mdb->old) {
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
			err = -EEXIST;
rollback:
			br->hash_max = old;
			goto unlock;
		}

		err = br_mdb_rehash(&br->mdb, br->hash_max,
				    br->hash_elasticity);
		if (err)
			goto rollback;
	}

unlock:
2234
	spin_unlock_bh(&br->multicast_lock);
2235 2236 2237

	return err;
}
2238

2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256
int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
{
	/* Currently we support only version 2 and 3 */
	switch (val) {
	case 2:
	case 3:
		break;
	default:
		return -EINVAL;
	}

	spin_lock_bh(&br->multicast_lock);
	br->multicast_igmp_version = val;
	spin_unlock_bh(&br->multicast_lock);

	return 0;
}

2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276
#if IS_ENABLED(CONFIG_IPV6)
int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
{
	/* Currently we support version 1 and 2 */
	switch (val) {
	case 1:
	case 2:
		break;
	default:
		return -EINVAL;
	}

	spin_lock_bh(&br->multicast_lock);
	br->multicast_mld_version = val;
	spin_unlock_bh(&br->multicast_lock);

	return 0;
}
#endif

2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332
/**
 * br_multicast_list_adjacent - Returns snooped multicast addresses
 * @dev:	The bridge port adjacent to which to retrieve addresses
 * @br_ip_list:	The list to store found, snooped multicast IP addresses in
 *
 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
 * snooping feature on all bridge ports of dev's bridge device, excluding
 * the addresses from dev itself.
 *
 * Returns the number of items added to br_ip_list.
 *
 * Notes:
 * - br_ip_list needs to be initialized by caller
 * - br_ip_list might contain duplicates in the end
 *   (needs to be taken care of by caller)
 * - br_ip_list needs to be freed by caller
 */
int br_multicast_list_adjacent(struct net_device *dev,
			       struct list_head *br_ip_list)
{
	struct net_bridge *br;
	struct net_bridge_port *port;
	struct net_bridge_port_group *group;
	struct br_ip_list *entry;
	int count = 0;

	rcu_read_lock();
	if (!br_ip_list || !br_port_exists(dev))
		goto unlock;

	port = br_port_get_rcu(dev);
	if (!port || !port->br)
		goto unlock;

	br = port->br;

	list_for_each_entry_rcu(port, &br->port_list, list) {
		if (!port->dev || port->dev == dev)
			continue;

		hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
			entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
			if (!entry)
				goto unlock;

			entry->addr = group->addr;
			list_add(&entry->list, br_ip_list);
			count++;
		}
	}

unlock:
	rcu_read_unlock();
	return count;
}
EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2333

2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370
/**
 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
 * @dev: The bridge port providing the bridge on which to check for a querier
 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
 *
 * Checks whether the given interface has a bridge on top and if so returns
 * true if a valid querier exists anywhere on the bridged link layer.
 * Otherwise returns false.
 */
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
{
	struct net_bridge *br;
	struct net_bridge_port *port;
	struct ethhdr eth;
	bool ret = false;

	rcu_read_lock();
	if (!br_port_exists(dev))
		goto unlock;

	port = br_port_get_rcu(dev);
	if (!port || !port->br)
		goto unlock;

	br = port->br;

	memset(&eth, 0, sizeof(eth));
	eth.h_proto = htons(proto);

	ret = br_multicast_querier_exists(br, &eth);

unlock:
	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);

2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
/**
 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
 * @dev: The bridge port adjacent to which to check for a querier
 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
 *
 * Checks whether the given interface has a bridge on top and if so returns
 * true if a selected querier is behind one of the other ports of this
 * bridge. Otherwise returns false.
 */
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
{
	struct net_bridge *br;
	struct net_bridge_port *port;
	bool ret = false;

	rcu_read_lock();
	if (!br_port_exists(dev))
		goto unlock;

	port = br_port_get_rcu(dev);
	if (!port || !port->br)
		goto unlock;

	br = port->br;

	switch (proto) {
	case ETH_P_IP:
		if (!timer_pending(&br->ip4_other_query.timer) ||
		    rcu_dereference(br->ip4_querier.port) == port)
			goto unlock;
		break;
2402
#if IS_ENABLED(CONFIG_IPV6)
2403 2404 2405 2406 2407
	case ETH_P_IPV6:
		if (!timer_pending(&br->ip6_other_query.timer) ||
		    rcu_dereference(br->ip6_querier.port) == port)
			goto unlock;
		break;
2408
#endif
2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
	default:
		goto unlock;
	}

	ret = true;
unlock:
	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2419 2420

static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2421
			       const struct sk_buff *skb, u8 type, u8 dir)
2422 2423
{
	struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2424 2425
	__be16 proto = skb->protocol;
	unsigned int t_len;
2426 2427 2428 2429

	u64_stats_update_begin(&pstats->syncp);
	switch (proto) {
	case htons(ETH_P_IP):
2430
		t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441
		switch (type) {
		case IGMP_HOST_MEMBERSHIP_REPORT:
			pstats->mstats.igmp_v1reports[dir]++;
			break;
		case IGMPV2_HOST_MEMBERSHIP_REPORT:
			pstats->mstats.igmp_v2reports[dir]++;
			break;
		case IGMPV3_HOST_MEMBERSHIP_REPORT:
			pstats->mstats.igmp_v3reports[dir]++;
			break;
		case IGMP_HOST_MEMBERSHIP_QUERY:
2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456
			if (t_len != sizeof(struct igmphdr)) {
				pstats->mstats.igmp_v3queries[dir]++;
			} else {
				unsigned int offset = skb_transport_offset(skb);
				struct igmphdr *ih, _ihdr;

				ih = skb_header_pointer(skb, offset,
							sizeof(_ihdr), &_ihdr);
				if (!ih)
					break;
				if (!ih->code)
					pstats->mstats.igmp_v1queries[dir]++;
				else
					pstats->mstats.igmp_v2queries[dir]++;
			}
2457 2458 2459 2460 2461 2462 2463 2464
			break;
		case IGMP_HOST_LEAVE_MESSAGE:
			pstats->mstats.igmp_leaves[dir]++;
			break;
		}
		break;
#if IS_ENABLED(CONFIG_IPV6)
	case htons(ETH_P_IPV6):
2465 2466 2467
		t_len = ntohs(ipv6_hdr(skb)->payload_len) +
			sizeof(struct ipv6hdr);
		t_len -= skb_network_header_len(skb);
2468 2469 2470 2471 2472 2473 2474 2475
		switch (type) {
		case ICMPV6_MGM_REPORT:
			pstats->mstats.mld_v1reports[dir]++;
			break;
		case ICMPV6_MLD2_REPORT:
			pstats->mstats.mld_v2reports[dir]++;
			break;
		case ICMPV6_MGM_QUERY:
2476 2477 2478 2479
			if (t_len != sizeof(struct mld_msg))
				pstats->mstats.mld_v2queries[dir]++;
			else
				pstats->mstats.mld_v1queries[dir]++;
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
			break;
		case ICMPV6_MGM_REDUCTION:
			pstats->mstats.mld_leaves[dir]++;
			break;
		}
		break;
#endif /* CONFIG_IPV6 */
	}
	u64_stats_update_end(&pstats->syncp);
}

void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2492
			const struct sk_buff *skb, u8 type, u8 dir)
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
{
	struct bridge_mcast_stats __percpu *stats;

	/* if multicast_disabled is true then igmp type can't be set */
	if (!type || !br->multicast_stats_enabled)
		return;

	if (p)
		stats = p->mcast_stats;
	else
		stats = br->mcast_stats;
	if (WARN_ON(!stats))
		return;

2507
	br_mcast_stats_add(stats, skb, type, dir);
2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551
}

int br_multicast_init_stats(struct net_bridge *br)
{
	br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
	if (!br->mcast_stats)
		return -ENOMEM;

	return 0;
}

static void mcast_stats_add_dir(u64 *dst, u64 *src)
{
	dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
	dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
}

void br_multicast_get_stats(const struct net_bridge *br,
			    const struct net_bridge_port *p,
			    struct br_mcast_stats *dest)
{
	struct bridge_mcast_stats __percpu *stats;
	struct br_mcast_stats tdst;
	int i;

	memset(dest, 0, sizeof(*dest));
	if (p)
		stats = p->mcast_stats;
	else
		stats = br->mcast_stats;
	if (WARN_ON(!stats))
		return;

	memset(&tdst, 0, sizeof(tdst));
	for_each_possible_cpu(i) {
		struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
		struct br_mcast_stats temp;
		unsigned int start;

		do {
			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
			memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));

2552 2553 2554
		mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
		mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
		mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2555 2556 2557 2558 2559 2560
		mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
		mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
		mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
		mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
		tdst.igmp_parse_errors += temp.igmp_parse_errors;

2561 2562
		mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
		mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2563 2564 2565 2566 2567 2568 2569
		mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
		mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
		mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
		tdst.mld_parse_errors += temp.mld_parse_errors;
	}
	memcpy(dest, &tdst, sizeof(*dest));
}