br_multicast.c 55.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Bridge multicast support.
 *
 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

#include <linux/err.h>
14
#include <linux/export.h>
15 16 17 18
#include <linux/if_ether.h>
#include <linux/igmp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
19
#include <linux/log2.h>
20 21 22 23 24 25 26
#include <linux/netdevice.h>
#include <linux/netfilter_bridge.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/timer.h>
27
#include <linux/inetdevice.h>
28
#include <net/ip.h>
E
Eric Dumazet 已提交
29
#if IS_ENABLED(CONFIG_IPV6)
30 31
#include <net/ipv6.h>
#include <net/mld.h>
32
#include <net/ip6_checksum.h>
33
#include <net/addrconf.h>
34
#endif
35 36 37

#include "br_private.h"

38
static void br_multicast_start_querier(struct net_bridge *br,
39
				       struct bridge_mcast_own_query *query);
40 41
static void br_multicast_add_router(struct net_bridge *br,
				    struct net_bridge_port *port);
42 43 44 45 46 47 48 49 50 51
static void br_ip4_multicast_leave_group(struct net_bridge *br,
					 struct net_bridge_port *port,
					 __be32 group,
					 __u16 vid);
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_group(struct net_bridge *br,
					 struct net_bridge_port *port,
					 const struct in6_addr *group,
					 __u16 vid);
#endif
C
Cong Wang 已提交
52
unsigned int br_mdb_rehash_seq;
53

54 55 56 57
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
{
	if (a->proto != b->proto)
		return 0;
58 59
	if (a->vid != b->vid)
		return 0;
60 61 62
	switch (a->proto) {
	case htons(ETH_P_IP):
		return a->u.ip4 == b->u.ip4;
E
Eric Dumazet 已提交
63
#if IS_ENABLED(CONFIG_IPV6)
64 65 66
	case htons(ETH_P_IPV6):
		return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
#endif
67 68 69 70
	}
	return 0;
}

71 72
static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
				__u16 vid)
73
{
74
	return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
75 76
}

E
Eric Dumazet 已提交
77
#if IS_ENABLED(CONFIG_IPV6)
78
static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
79 80
				const struct in6_addr *ip,
				__u16 vid)
81
{
82 83
	return jhash_2words(ipv6_addr_hash(ip), vid,
			    mdb->secret) & (mdb->max - 1);
84 85 86
}
#endif

87 88 89 90 91
static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
			     struct br_ip *ip)
{
	switch (ip->proto) {
	case htons(ETH_P_IP):
92
		return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
E
Eric Dumazet 已提交
93
#if IS_ENABLED(CONFIG_IPV6)
94
	case htons(ETH_P_IPV6):
95
		return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
96
#endif
97 98
	}
	return 0;
99 100 101
}

static struct net_bridge_mdb_entry *__br_mdb_ip_get(
102
	struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
103 104 105
{
	struct net_bridge_mdb_entry *mp;

106
	hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
107
		if (br_ip_equal(&mp->addr, dst))
108 109 110 111 112 113
			return mp;
	}

	return NULL;
}

114 115
struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
					   struct br_ip *dst)
116 117 118 119 120 121 122
{
	if (!mdb)
		return NULL;

	return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
}

123
static struct net_bridge_mdb_entry *br_mdb_ip4_get(
124
	struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
125
{
126 127 128 129
	struct br_ip br_dst;

	br_dst.u.ip4 = dst;
	br_dst.proto = htons(ETH_P_IP);
130
	br_dst.vid = vid;
131

132
	return br_mdb_ip_get(mdb, &br_dst);
133 134
}

E
Eric Dumazet 已提交
135
#if IS_ENABLED(CONFIG_IPV6)
136
static struct net_bridge_mdb_entry *br_mdb_ip6_get(
137 138
	struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
	__u16 vid)
139 140
{
	struct br_ip br_dst;
141

A
Alexey Dobriyan 已提交
142
	br_dst.u.ip6 = *dst;
143
	br_dst.proto = htons(ETH_P_IPV6);
144
	br_dst.vid = vid;
145

146
	return br_mdb_ip_get(mdb, &br_dst);
147 148 149
}
#endif

150
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
151
					struct sk_buff *skb, u16 vid)
152
{
153
	struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
154 155
	struct br_ip ip;

156
	if (br->multicast_disabled)
157 158
		return NULL;

159
	if (BR_INPUT_SKB_CB(skb)->igmp)
160 161
		return NULL;

162
	ip.proto = skb->protocol;
163
	ip.vid = vid;
164

165 166
	switch (skb->protocol) {
	case htons(ETH_P_IP):
167 168
		ip.u.ip4 = ip_hdr(skb)->daddr;
		break;
E
Eric Dumazet 已提交
169
#if IS_ENABLED(CONFIG_IPV6)
170
	case htons(ETH_P_IPV6):
A
Alexey Dobriyan 已提交
171
		ip.u.ip6 = ipv6_hdr(skb)->daddr;
172 173
		break;
#endif
174 175
	default:
		return NULL;
176 177
	}

178
	return br_mdb_ip_get(mdb, &ip);
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
}

static void br_mdb_free(struct rcu_head *head)
{
	struct net_bridge_mdb_htable *mdb =
		container_of(head, struct net_bridge_mdb_htable, rcu);
	struct net_bridge_mdb_htable *old = mdb->old;

	mdb->old = NULL;
	kfree(old->mhash);
	kfree(old);
}

static int br_mdb_copy(struct net_bridge_mdb_htable *new,
		       struct net_bridge_mdb_htable *old,
		       int elasticity)
{
	struct net_bridge_mdb_entry *mp;
	int maxlen;
	int len;
	int i;

	for (i = 0; i < old->max; i++)
202
		hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
203
			hlist_add_head(&mp->hlist[new->ver],
204
				       &new->mhash[br_ip_hash(new, &mp->addr)]);
205 206 207 208 209 210 211

	if (!elasticity)
		return 0;

	maxlen = 0;
	for (i = 0; i < new->max; i++) {
		len = 0;
212
		hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
213 214 215 216 217 218 219 220
			len++;
		if (len > maxlen)
			maxlen = len;
	}

	return maxlen > elasticity ? -EINVAL : 0;
}

221
void br_multicast_free_pg(struct rcu_head *head)
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
{
	struct net_bridge_port_group *p =
		container_of(head, struct net_bridge_port_group, rcu);

	kfree(p);
}

static void br_multicast_free_group(struct rcu_head *head)
{
	struct net_bridge_mdb_entry *mp =
		container_of(head, struct net_bridge_mdb_entry, rcu);

	kfree(mp);
}

static void br_multicast_group_expired(unsigned long data)
{
	struct net_bridge_mdb_entry *mp = (void *)data;
	struct net_bridge *br = mp->br;
	struct net_bridge_mdb_htable *mdb;

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) || timer_pending(&mp->timer))
		goto out;

247
	mp->mglist = false;
248 249 250 251

	if (mp->ports)
		goto out;

252 253
	mdb = mlock_dereference(br->mdb, br);

254 255 256 257 258 259 260 261 262 263 264 265
	hlist_del_rcu(&mp->hlist[mdb->ver]);
	mdb->size--;

	call_rcu_bh(&mp->rcu, br_multicast_free_group);

out:
	spin_unlock(&br->multicast_lock);
}

static void br_multicast_del_pg(struct net_bridge *br,
				struct net_bridge_port_group *pg)
{
266
	struct net_bridge_mdb_htable *mdb;
267 268
	struct net_bridge_mdb_entry *mp;
	struct net_bridge_port_group *p;
269 270 271
	struct net_bridge_port_group __rcu **pp;

	mdb = mlock_dereference(br->mdb, br);
272

273
	mp = br_mdb_ip_get(mdb, &pg->addr);
274 275 276
	if (WARN_ON(!mp))
		return;

277 278 279
	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
280 281 282
		if (p != pg)
			continue;

283
		rcu_assign_pointer(*pp, p->next);
284 285
		hlist_del_init(&p->mglist);
		del_timer(&p->timer);
286 287
		br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
			      p->flags);
288 289
		call_rcu_bh(&p->rcu, br_multicast_free_pg);

290
		if (!mp->ports && !mp->mglist &&
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
		    netif_running(br->dev))
			mod_timer(&mp->timer, jiffies);

		return;
	}

	WARN_ON(1);
}

static void br_multicast_port_group_expired(unsigned long data)
{
	struct net_bridge_port_group *pg = (void *)data;
	struct net_bridge *br = pg->port->br;

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
307
	    hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
308 309 310 311 312 313 314 315
		goto out;

	br_multicast_del_pg(br, pg);

out:
	spin_unlock(&br->multicast_lock);
}

316
static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
317 318
			 int elasticity)
{
319
	struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
	struct net_bridge_mdb_htable *mdb;
	int err;

	mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
	if (!mdb)
		return -ENOMEM;

	mdb->max = max;
	mdb->old = old;

	mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
	if (!mdb->mhash) {
		kfree(mdb);
		return -ENOMEM;
	}

	mdb->size = old ? old->size : 0;
	mdb->ver = old ? old->ver ^ 1 : 0;

	if (!old || elasticity)
		get_random_bytes(&mdb->secret, sizeof(mdb->secret));
	else
		mdb->secret = old->secret;

	if (!old)
		goto out;

	err = br_mdb_copy(mdb, old, elasticity);
	if (err) {
		kfree(mdb->mhash);
		kfree(mdb);
		return err;
	}

C
Cong Wang 已提交
354
	br_mdb_rehash_seq++;
355 356 357 358 359 360 361 362
	call_rcu_bh(&mdb->rcu, br_mdb_free);

out:
	rcu_assign_pointer(*mdbp, mdb);

	return 0;
}

363
static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
364 365
						    __be32 group,
						    u8 *igmp_type)
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
{
	struct sk_buff *skb;
	struct igmphdr *ih;
	struct ethhdr *eth;
	struct iphdr *iph;

	skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
						 sizeof(*ih) + 4);
	if (!skb)
		goto out;

	skb->protocol = htons(ETH_P_IP);

	skb_reset_mac_header(skb);
	eth = eth_hdr(skb);

382
	ether_addr_copy(eth->h_source, br->dev->dev_addr);
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
	eth->h_dest[0] = 1;
	eth->h_dest[1] = 0;
	eth->h_dest[2] = 0x5e;
	eth->h_dest[3] = 0;
	eth->h_dest[4] = 0;
	eth->h_dest[5] = 1;
	eth->h_proto = htons(ETH_P_IP);
	skb_put(skb, sizeof(*eth));

	skb_set_network_header(skb, skb->len);
	iph = ip_hdr(skb);

	iph->version = 4;
	iph->ihl = 6;
	iph->tos = 0xc0;
	iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
	iph->id = 0;
	iph->frag_off = htons(IP_DF);
	iph->ttl = 1;
	iph->protocol = IPPROTO_IGMP;
403 404
	iph->saddr = br->multicast_query_use_ifaddr ?
		     inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
405 406 407 408 409 410 411 412 413 414
	iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
	((u8 *)&iph[1])[0] = IPOPT_RA;
	((u8 *)&iph[1])[1] = 4;
	((u8 *)&iph[1])[2] = 0;
	((u8 *)&iph[1])[3] = 0;
	ip_send_check(iph);
	skb_put(skb, 24);

	skb_set_transport_header(skb, skb->len);
	ih = igmp_hdr(skb);
415
	*igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
	ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
	ih->code = (group ? br->multicast_last_member_interval :
			    br->multicast_query_response_interval) /
		   (HZ / IGMP_TIMER_SCALE);
	ih->group = group;
	ih->csum = 0;
	ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
	skb_put(skb, sizeof(*ih));

	__skb_pull(skb, sizeof(*eth));

out:
	return skb;
}

E
Eric Dumazet 已提交
431
#if IS_ENABLED(CONFIG_IPV6)
432
static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
433 434
						    const struct in6_addr *grp,
						    u8 *igmp_type)
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
{
	struct sk_buff *skb;
	struct ipv6hdr *ip6h;
	struct mld_msg *mldq;
	struct ethhdr *eth;
	u8 *hopopt;
	unsigned long interval;

	skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
						 8 + sizeof(*mldq));
	if (!skb)
		goto out;

	skb->protocol = htons(ETH_P_IPV6);

	/* Ethernet header */
	skb_reset_mac_header(skb);
	eth = eth_hdr(skb);

454
	ether_addr_copy(eth->h_source, br->dev->dev_addr);
455 456 457 458 459 460 461 462
	eth->h_proto = htons(ETH_P_IPV6);
	skb_put(skb, sizeof(*eth));

	/* IPv6 header + HbH option */
	skb_set_network_header(skb, skb->len);
	ip6h = ipv6_hdr(skb);

	*(__force __be32 *)ip6h = htonl(0x60000000);
463
	ip6h->payload_len = htons(8 + sizeof(*mldq));
464 465
	ip6h->nexthdr = IPPROTO_HOPOPTS;
	ip6h->hop_limit = 1;
466
	ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
467 468 469
	if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
			       &ip6h->saddr)) {
		kfree_skb(skb);
470
		br->has_ipv6_addr = 0;
471 472
		return NULL;
	}
473 474

	br->has_ipv6_addr = 1;
475
	ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
476 477 478 479 480 481 482 483

	hopopt = (u8 *)(ip6h + 1);
	hopopt[0] = IPPROTO_ICMPV6;		/* next hdr */
	hopopt[1] = 0;				/* length of HbH */
	hopopt[2] = IPV6_TLV_ROUTERALERT;	/* Router Alert */
	hopopt[3] = 2;				/* Length of RA Option */
	hopopt[4] = 0;				/* Type = 0x0000 (MLD) */
	hopopt[5] = 0;
484 485
	hopopt[6] = IPV6_TLV_PAD1;		/* Pad1 */
	hopopt[7] = IPV6_TLV_PAD1;		/* Pad1 */
486 487 488 489 490 491 492

	skb_put(skb, sizeof(*ip6h) + 8);

	/* ICMPv6 */
	skb_set_transport_header(skb, skb->len);
	mldq = (struct mld_msg *) icmp6_hdr(skb);

493
	interval = ipv6_addr_any(grp) ?
494 495
			br->multicast_query_response_interval :
			br->multicast_last_member_interval;
496

497
	*igmp_type = ICMPV6_MGM_QUERY;
498 499 500 501 502
	mldq->mld_type = ICMPV6_MGM_QUERY;
	mldq->mld_code = 0;
	mldq->mld_cksum = 0;
	mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
	mldq->mld_reserved = 0;
503
	mldq->mld_mca = *grp;
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518

	/* checksum */
	mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
					  sizeof(*mldq), IPPROTO_ICMPV6,
					  csum_partial(mldq,
						       sizeof(*mldq), 0));
	skb_put(skb, sizeof(*mldq));

	__skb_pull(skb, sizeof(*eth));

out:
	return skb;
}
#endif

519
static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
520 521
						struct br_ip *addr,
						u8 *igmp_type)
522 523 524
{
	switch (addr->proto) {
	case htons(ETH_P_IP):
525
		return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
E
Eric Dumazet 已提交
526
#if IS_ENABLED(CONFIG_IPV6)
527
	case htons(ETH_P_IPV6):
528 529
		return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
						    igmp_type);
530
#endif
531 532 533 534
	}
	return NULL;
}

535
static struct net_bridge_mdb_entry *br_multicast_get_group(
536 537
	struct net_bridge *br, struct net_bridge_port *port,
	struct br_ip *group, int hash)
538
{
539
	struct net_bridge_mdb_htable *mdb;
540
	struct net_bridge_mdb_entry *mp;
541 542
	unsigned int count = 0;
	unsigned int max;
543 544 545
	int elasticity;
	int err;

546
	mdb = rcu_dereference_protected(br->mdb, 1);
547
	hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
548
		count++;
549
		if (unlikely(br_ip_equal(group, &mp->addr)))
550 551 552 553 554 555 556 557
			return mp;
	}

	elasticity = 0;
	max = mdb->max;

	if (unlikely(count > br->hash_elasticity && count)) {
		if (net_ratelimit())
558 559 560
			br_info(br, "Multicast hash table "
				"chain limit reached: %s\n",
				port ? port->dev->name : br->dev->name);
561 562 563 564 565 566

		elasticity = br->hash_elasticity;
	}

	if (mdb->size >= max) {
		max *= 2;
567 568 569 570 571
		if (unlikely(max > br->hash_max)) {
			br_warn(br, "Multicast hash table maximum of %d "
				"reached, disabling snooping: %s\n",
				br->hash_max,
				port ? port->dev->name : br->dev->name);
572 573 574 575 576 577 578 579 580 581
			err = -E2BIG;
disable:
			br->multicast_disabled = 1;
			goto err;
		}
	}

	if (max > mdb->max || elasticity) {
		if (mdb->old) {
			if (net_ratelimit())
582 583 584
				br_info(br, "Multicast hash table "
					"on fire: %s\n",
					port ? port->dev->name : br->dev->name);
585 586 587 588 589 590
			err = -EEXIST;
			goto err;
		}

		err = br_mdb_rehash(&br->mdb, max, elasticity);
		if (err) {
591 592 593 594
			br_warn(br, "Cannot rehash multicast "
				"hash table, disabling snooping: %s, %d, %d\n",
				port ? port->dev->name : br->dev->name,
				mdb->size, err);
595 596 597 598 599 600 601 602 603 604 605 606 607 608
			goto disable;
		}

		err = -EAGAIN;
		goto err;
	}

	return NULL;

err:
	mp = ERR_PTR(err);
	return mp;
}

609 610
struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
	struct net_bridge_port *port, struct br_ip *group)
611
{
612
	struct net_bridge_mdb_htable *mdb;
613 614
	struct net_bridge_mdb_entry *mp;
	int hash;
615
	int err;
616

617
	mdb = rcu_dereference_protected(br->mdb, 1);
618
	if (!mdb) {
619 620 621
		err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
		if (err)
			return ERR_PTR(err);
622 623 624 625 626 627 628 629 630 631 632
		goto rehash;
	}

	hash = br_ip_hash(mdb, group);
	mp = br_multicast_get_group(br, port, group, hash);
	switch (PTR_ERR(mp)) {
	case 0:
		break;

	case -EAGAIN:
rehash:
633
		mdb = rcu_dereference_protected(br->mdb, 1);
634 635 636 637 638 639 640 641 642
		hash = br_ip_hash(mdb, group);
		break;

	default:
		goto out;
	}

	mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
	if (unlikely(!mp))
643
		return ERR_PTR(-ENOMEM);
644 645

	mp->br = br;
646
	mp->addr = *group;
647 648 649
	setup_timer(&mp->timer, br_multicast_group_expired,
		    (unsigned long)mp);

650 651 652 653 654 655 656
	hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
	mdb->size++;

out:
	return mp;
}

657 658 659
struct net_bridge_port_group *br_multicast_new_port_group(
			struct net_bridge_port *port,
			struct br_ip *group,
660
			struct net_bridge_port_group __rcu *next,
661
			unsigned char flags)
662 663 664 665 666 667 668 669 670
{
	struct net_bridge_port_group *p;

	p = kzalloc(sizeof(*p), GFP_ATOMIC);
	if (unlikely(!p))
		return NULL;

	p->addr = *group;
	p->port = port;
671
	p->flags = flags;
672
	rcu_assign_pointer(p->next, next);
673 674 675 676 677 678
	hlist_add_head(&p->mglist, &port->mglist);
	setup_timer(&p->timer, br_multicast_port_group_expired,
		    (unsigned long)p);
	return p;
}

679
static int br_multicast_add_group(struct net_bridge *br,
680 681
				  struct net_bridge_port *port,
				  struct br_ip *group)
682 683 684
{
	struct net_bridge_mdb_entry *mp;
	struct net_bridge_port_group *p;
685
	struct net_bridge_port_group __rcu **pp;
686
	unsigned long now = jiffies;
687 688 689 690 691 692 693 694 695
	int err;

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) ||
	    (port && port->state == BR_STATE_DISABLED))
		goto out;

	mp = br_multicast_new_group(br, port, group);
	err = PTR_ERR(mp);
696
	if (IS_ERR(mp))
697 698 699
		goto err;

	if (!port) {
700
		mp->mglist = true;
701
		mod_timer(&mp->timer, now + br->multicast_membership_interval);
702 703 704
		goto out;
	}

705 706 707
	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
708
		if (p->port == port)
709
			goto found;
710 711 712 713
		if ((unsigned long)p->port < (unsigned long)port)
			break;
	}

714
	p = br_multicast_new_port_group(port, group, *pp, 0);
715 716 717
	if (unlikely(!p))
		goto err;
	rcu_assign_pointer(*pp, p);
718
	br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
719

720 721
found:
	mod_timer(&p->timer, now + br->multicast_membership_interval);
722 723 724 725 726 727 728 729
out:
	err = 0;

err:
	spin_unlock(&br->multicast_lock);
	return err;
}

730 731
static int br_ip4_multicast_add_group(struct net_bridge *br,
				      struct net_bridge_port *port,
732 733
				      __be32 group,
				      __u16 vid)
734 735 736 737 738 739 740 741
{
	struct br_ip br_group;

	if (ipv4_is_local_multicast(group))
		return 0;

	br_group.u.ip4 = group;
	br_group.proto = htons(ETH_P_IP);
742
	br_group.vid = vid;
743 744 745 746

	return br_multicast_add_group(br, port, &br_group);
}

E
Eric Dumazet 已提交
747
#if IS_ENABLED(CONFIG_IPV6)
748 749
static int br_ip6_multicast_add_group(struct net_bridge *br,
				      struct net_bridge_port *port,
750 751
				      const struct in6_addr *group,
				      __u16 vid)
752 753 754
{
	struct br_ip br_group;

755
	if (ipv6_addr_is_ll_all_nodes(group))
756 757
		return 0;

A
Alexey Dobriyan 已提交
758
	br_group.u.ip6 = *group;
759
	br_group.proto = htons(ETH_P_IPV6);
760
	br_group.vid = vid;
761 762 763 764 765

	return br_multicast_add_group(br, port, &br_group);
}
#endif

766 767 768 769 770 771
static void br_multicast_router_expired(unsigned long data)
{
	struct net_bridge_port *port = (void *)data;
	struct net_bridge *br = port->br;

	spin_lock(&br->multicast_lock);
772 773
	if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
	    port->multicast_router == MDB_RTR_TYPE_PERM ||
774 775 776 777 778
	    timer_pending(&port->multicast_router_timer) ||
	    hlist_unhashed(&port->rlist))
		goto out;

	hlist_del_init_rcu(&port->rlist);
779
	br_rtr_notify(br->dev, port, RTM_DELMDB);
780 781 782
	/* Don't allow timer refresh if the router expired */
	if (port->multicast_router == MDB_RTR_TYPE_TEMP)
		port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
783 784 785 786 787 788 789 790 791

out:
	spin_unlock(&br->multicast_lock);
}

static void br_multicast_local_router_expired(unsigned long data)
{
}

792
static void br_multicast_querier_expired(struct net_bridge *br,
793
					 struct bridge_mcast_own_query *query)
794 795 796 797 798
{
	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) || br->multicast_disabled)
		goto out;

799
	br_multicast_start_querier(br, query);
800 801 802 803 804

out:
	spin_unlock(&br->multicast_lock);
}

805 806 807 808
static void br_ip4_multicast_querier_expired(unsigned long data)
{
	struct net_bridge *br = (void *)data;

809
	br_multicast_querier_expired(br, &br->ip4_own_query);
810 811 812 813 814 815 816
}

#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_querier_expired(unsigned long data)
{
	struct net_bridge *br = (void *)data;

817
	br_multicast_querier_expired(br, &br->ip6_own_query);
818 819 820
}
#endif

821 822 823 824 825 826 827 828 829 830 831 832
static void br_multicast_select_own_querier(struct net_bridge *br,
					    struct br_ip *ip,
					    struct sk_buff *skb)
{
	if (ip->proto == htons(ETH_P_IP))
		br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
#if IS_ENABLED(CONFIG_IPV6)
	else
		br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
#endif
}

833 834 835
static void __br_multicast_send_query(struct net_bridge *br,
				      struct net_bridge_port *port,
				      struct br_ip *ip)
836 837
{
	struct sk_buff *skb;
838
	u8 igmp_type;
839

840
	skb = br_multicast_alloc_query(br, ip, &igmp_type);
841
	if (!skb)
842
		return;
843 844 845

	if (port) {
		skb->dev = port->dev;
846 847
		br_multicast_count(br, port, skb->protocol, igmp_type,
				   BR_MCAST_DIR_TX);
848 849
		NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
			dev_net(port->dev), NULL, skb, NULL, skb->dev,
850
			br_dev_queue_push_xmit);
851 852
	} else {
		br_multicast_select_own_querier(br, ip, skb);
853 854
		br_multicast_count(br, port, skb->protocol, igmp_type,
				   BR_MCAST_DIR_RX);
855
		netif_rx(skb);
856
	}
857 858 859
}

static void br_multicast_send_query(struct net_bridge *br,
860
				    struct net_bridge_port *port,
861
				    struct bridge_mcast_own_query *own_query)
862 863 864
{
	unsigned long time;
	struct br_ip br_group;
865
	struct bridge_mcast_other_query *other_query = NULL;
866 867

	if (!netif_running(br->dev) || br->multicast_disabled ||
868
	    !br->multicast_querier)
869 870
		return;

871 872
	memset(&br_group.u, 0, sizeof(br_group.u));

873 874 875
	if (port ? (own_query == &port->ip4_own_query) :
		   (own_query == &br->ip4_own_query)) {
		other_query = &br->ip4_other_query;
876
		br_group.proto = htons(ETH_P_IP);
E
Eric Dumazet 已提交
877
#if IS_ENABLED(CONFIG_IPV6)
878
	} else {
879
		other_query = &br->ip6_other_query;
880
		br_group.proto = htons(ETH_P_IPV6);
881
#endif
882 883
	}

884
	if (!other_query || timer_pending(&other_query->timer))
885 886 887
		return;

	__br_multicast_send_query(br, port, &br_group);
888 889

	time = jiffies;
890
	time += own_query->startup_sent < br->multicast_startup_query_count ?
891 892
		br->multicast_startup_query_interval :
		br->multicast_query_interval;
893
	mod_timer(&own_query->timer, time);
894 895
}

896 897 898
static void
br_multicast_port_query_expired(struct net_bridge_port *port,
				struct bridge_mcast_own_query *query)
899 900 901 902
{
	struct net_bridge *br = port->br;

	spin_lock(&br->multicast_lock);
903 904
	if (port->state == BR_STATE_DISABLED ||
	    port->state == BR_STATE_BLOCKING)
905 906
		goto out;

907 908
	if (query->startup_sent < br->multicast_startup_query_count)
		query->startup_sent++;
909

910
	br_multicast_send_query(port->br, port, query);
911 912 913 914 915

out:
	spin_unlock(&br->multicast_lock);
}

916 917 918 919
static void br_ip4_multicast_port_query_expired(unsigned long data)
{
	struct net_bridge_port *port = (void *)data;

920
	br_multicast_port_query_expired(port, &port->ip4_own_query);
921 922 923 924 925 926 927
}

#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_port_query_expired(unsigned long data)
{
	struct net_bridge_port *port = (void *)data;

928
	br_multicast_port_query_expired(port, &port->ip6_own_query);
929 930 931
}
#endif

932
int br_multicast_add_port(struct net_bridge_port *port)
933
{
934
	port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
935 936 937

	setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
		    (unsigned long)port);
938 939
	setup_timer(&port->ip4_own_query.timer,
		    br_ip4_multicast_port_query_expired, (unsigned long)port);
940
#if IS_ENABLED(CONFIG_IPV6)
941 942
	setup_timer(&port->ip6_own_query.timer,
		    br_ip6_multicast_port_query_expired, (unsigned long)port);
943
#endif
944 945 946 947 948
	port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
	if (!port->mcast_stats)
		return -ENOMEM;

	return 0;
949 950 951 952
}

void br_multicast_del_port(struct net_bridge_port *port)
{
953 954 955 956 957 958 959 960 961
	struct net_bridge *br = port->br;
	struct net_bridge_port_group *pg;
	struct hlist_node *n;

	/* Take care of the remaining groups, only perm ones should be left */
	spin_lock_bh(&br->multicast_lock);
	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
		br_multicast_del_pg(br, pg);
	spin_unlock_bh(&br->multicast_lock);
962
	del_timer_sync(&port->multicast_router_timer);
963
	free_percpu(port->mcast_stats);
964 965
}

966
static void br_multicast_enable(struct bridge_mcast_own_query *query)
967
{
968
	query->startup_sent = 0;
969

970 971 972
	if (try_to_del_timer_sync(&query->timer) >= 0 ||
	    del_timer(&query->timer))
		mod_timer(&query->timer, jiffies);
973 974
}

975 976 977 978 979 980 981 982
void br_multicast_enable_port(struct net_bridge_port *port)
{
	struct net_bridge *br = port->br;

	spin_lock(&br->multicast_lock);
	if (br->multicast_disabled || !netif_running(br->dev))
		goto out;

983
	br_multicast_enable(&port->ip4_own_query);
984
#if IS_ENABLED(CONFIG_IPV6)
985
	br_multicast_enable(&port->ip6_own_query);
986
#endif
987 988
	if (port->multicast_router == MDB_RTR_TYPE_PERM &&
	    hlist_unhashed(&port->rlist))
989
		br_multicast_add_router(br, port);
990 991 992 993 994 995 996 997 998

out:
	spin_unlock(&br->multicast_lock);
}

void br_multicast_disable_port(struct net_bridge_port *port)
{
	struct net_bridge *br = port->br;
	struct net_bridge_port_group *pg;
999
	struct hlist_node *n;
1000 1001

	spin_lock(&br->multicast_lock);
1002
	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1003
		if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1004
			br_multicast_del_pg(br, pg);
1005

1006
	if (!hlist_unhashed(&port->rlist)) {
1007
		hlist_del_init_rcu(&port->rlist);
1008
		br_rtr_notify(br->dev, port, RTM_DELMDB);
1009 1010 1011
		/* Don't allow timer refresh if disabling */
		if (port->multicast_router == MDB_RTR_TYPE_TEMP)
			port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1012
	}
1013
	del_timer(&port->multicast_router_timer);
1014
	del_timer(&port->ip4_own_query.timer);
1015
#if IS_ENABLED(CONFIG_IPV6)
1016
	del_timer(&port->ip6_own_query.timer);
1017
#endif
1018 1019 1020
	spin_unlock(&br->multicast_lock);
}

1021 1022
static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
					 struct net_bridge_port *port,
1023 1024
					 struct sk_buff *skb,
					 u16 vid)
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
{
	struct igmpv3_report *ih;
	struct igmpv3_grec *grec;
	int i;
	int len;
	int num;
	int type;
	int err = 0;
	__be32 group;

	ih = igmpv3_report_hdr(skb);
	num = ntohs(ih->ngrec);
1037
	len = skb_transport_offset(skb) + sizeof(*ih);
1038 1039 1040 1041 1042 1043

	for (i = 0; i < num; i++) {
		len += sizeof(*grec);
		if (!pskb_may_pull(skb, len))
			return -EINVAL;

H
Herbert Xu 已提交
1044
		grec = (void *)(skb->data + len - sizeof(*grec));
1045 1046 1047
		group = grec->grec_mca;
		type = grec->grec_type;

E
Eric Dumazet 已提交
1048
		len += ntohs(grec->grec_nsrcs) * 4;
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
		if (!pskb_may_pull(skb, len))
			return -EINVAL;

		/* We treat this as an IGMPv2 report for now. */
		switch (type) {
		case IGMPV3_MODE_IS_INCLUDE:
		case IGMPV3_MODE_IS_EXCLUDE:
		case IGMPV3_CHANGE_TO_INCLUDE:
		case IGMPV3_CHANGE_TO_EXCLUDE:
		case IGMPV3_ALLOW_NEW_SOURCES:
		case IGMPV3_BLOCK_OLD_SOURCES:
			break;

		default:
			continue;
		}

1066 1067 1068 1069 1070 1071 1072 1073 1074
		if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
		     type == IGMPV3_MODE_IS_INCLUDE) &&
		    ntohs(grec->grec_nsrcs) == 0) {
			br_ip4_multicast_leave_group(br, port, group, vid);
		} else {
			err = br_ip4_multicast_add_group(br, port, group, vid);
			if (err)
				break;
		}
1075 1076 1077 1078 1079
	}

	return err;
}

E
Eric Dumazet 已提交
1080
#if IS_ENABLED(CONFIG_IPV6)
1081 1082
static int br_ip6_multicast_mld2_report(struct net_bridge *br,
					struct net_bridge_port *port,
1083 1084
					struct sk_buff *skb,
					u16 vid)
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
{
	struct icmp6hdr *icmp6h;
	struct mld2_grec *grec;
	int i;
	int len;
	int num;
	int err = 0;

	if (!pskb_may_pull(skb, sizeof(*icmp6h)))
		return -EINVAL;

	icmp6h = icmp6_hdr(skb);
	num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1098
	len = skb_transport_offset(skb) + sizeof(*icmp6h);
1099 1100 1101 1102 1103 1104

	for (i = 0; i < num; i++) {
		__be16 *nsrcs, _nsrcs;

		nsrcs = skb_header_pointer(skb,
					   len + offsetof(struct mld2_grec,
1105
							  grec_nsrcs),
1106 1107 1108 1109 1110 1111
					   sizeof(_nsrcs), &_nsrcs);
		if (!nsrcs)
			return -EINVAL;

		if (!pskb_may_pull(skb,
				   len + sizeof(*grec) +
1112
				   sizeof(struct in6_addr) * ntohs(*nsrcs)))
1113 1114 1115
			return -EINVAL;

		grec = (struct mld2_grec *)(skb->data + len);
1116 1117
		len += sizeof(*grec) +
		       sizeof(struct in6_addr) * ntohs(*nsrcs);
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132

		/* We treat these as MLDv1 reports for now. */
		switch (grec->grec_type) {
		case MLD2_MODE_IS_INCLUDE:
		case MLD2_MODE_IS_EXCLUDE:
		case MLD2_CHANGE_TO_INCLUDE:
		case MLD2_CHANGE_TO_EXCLUDE:
		case MLD2_ALLOW_NEW_SOURCES:
		case MLD2_BLOCK_OLD_SOURCES:
			break;

		default:
			continue;
		}

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
		if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
		     grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
		    ntohs(*nsrcs) == 0) {
			br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
						     vid);
		} else {
			err = br_ip6_multicast_add_group(br, port,
							 &grec->grec_mca, vid);
			if (!err)
				break;
		}
1144 1145 1146 1147 1148 1149
	}

	return err;
}
#endif

1150
static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1151
					    struct net_bridge_port *port,
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
					    __be32 saddr)
{
	if (!timer_pending(&br->ip4_own_query.timer) &&
	    !timer_pending(&br->ip4_other_query.timer))
		goto update;

	if (!br->ip4_querier.addr.u.ip4)
		goto update;

	if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
		goto update;

	return false;

update:
	br->ip4_querier.addr.u.ip4 = saddr;

1169 1170 1171
	/* update protected by general multicast_lock by caller */
	rcu_assign_pointer(br->ip4_querier.port, port);

1172 1173 1174 1175 1176
	return true;
}

#if IS_ENABLED(CONFIG_IPV6)
static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1177
					    struct net_bridge_port *port,
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
					    struct in6_addr *saddr)
{
	if (!timer_pending(&br->ip6_own_query.timer) &&
	    !timer_pending(&br->ip6_other_query.timer))
		goto update;

	if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
		goto update;

	return false;

update:
	br->ip6_querier.addr.u.ip6 = *saddr;

1192 1193 1194
	/* update protected by general multicast_lock by caller */
	rcu_assign_pointer(br->ip6_querier.port, port);

1195 1196 1197 1198 1199
	return true;
}
#endif

static bool br_multicast_select_querier(struct net_bridge *br,
1200
					struct net_bridge_port *port,
1201 1202 1203 1204
					struct br_ip *saddr)
{
	switch (saddr->proto) {
	case htons(ETH_P_IP):
1205
		return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1206 1207
#if IS_ENABLED(CONFIG_IPV6)
	case htons(ETH_P_IPV6):
1208
		return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1209 1210 1211 1212 1213 1214
#endif
	}

	return false;
}

1215
static void
1216 1217 1218
br_multicast_update_query_timer(struct net_bridge *br,
				struct bridge_mcast_other_query *query,
				unsigned long max_delay)
1219
{
1220 1221
	if (!timer_pending(&query->timer))
		query->delay_time = jiffies + max_delay;
1222

1223
	mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1224 1225
}

1226
/*
C
Cong Wang 已提交
1227
 * Add port to router_list
1228 1229 1230
 *  list is maintained ordered by pointer value
 *  and locked by br->multicast_lock and RCU
 */
1231 1232 1233
static void br_multicast_add_router(struct net_bridge *br,
				    struct net_bridge_port *port)
{
1234
	struct net_bridge_port *p;
1235
	struct hlist_node *slot = NULL;
1236

1237 1238 1239
	if (!hlist_unhashed(&port->rlist))
		return;

1240
	hlist_for_each_entry(p, &br->router_list, rlist) {
1241 1242
		if ((unsigned long) port >= (unsigned long) p)
			break;
1243
		slot = &p->rlist;
1244 1245
	}

1246
	if (slot)
1247
		hlist_add_behind_rcu(&port->rlist, slot);
1248 1249
	else
		hlist_add_head_rcu(&port->rlist, &br->router_list);
1250
	br_rtr_notify(br->dev, port, RTM_NEWMDB);
1251 1252
}

1253 1254 1255 1256 1257 1258
static void br_multicast_mark_router(struct net_bridge *br,
				     struct net_bridge_port *port)
{
	unsigned long now = jiffies;

	if (!port) {
1259
		if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY)
1260 1261 1262 1263 1264
			mod_timer(&br->multicast_router_timer,
				  now + br->multicast_querier_interval);
		return;
	}

1265 1266
	if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
	    port->multicast_router == MDB_RTR_TYPE_PERM)
1267 1268
		return;

1269
	br_multicast_add_router(br, port);
1270 1271 1272 1273 1274 1275 1276

	mod_timer(&port->multicast_router_timer,
		  now + br->multicast_querier_interval);
}

static void br_multicast_query_received(struct net_bridge *br,
					struct net_bridge_port *port,
1277
					struct bridge_mcast_other_query *query,
1278
					struct br_ip *saddr,
1279
					unsigned long max_delay)
1280
{
1281
	if (!br_multicast_select_querier(br, port, saddr))
1282 1283
		return;

1284
	br_multicast_update_query_timer(br, query, max_delay);
1285 1286 1287
	br_multicast_mark_router(br, port);
}

1288 1289
static int br_ip4_multicast_query(struct net_bridge *br,
				  struct net_bridge_port *port,
1290 1291
				  struct sk_buff *skb,
				  u16 vid)
1292
{
1293
	const struct iphdr *iph = ip_hdr(skb);
1294 1295 1296 1297
	struct igmphdr *ih = igmp_hdr(skb);
	struct net_bridge_mdb_entry *mp;
	struct igmpv3_query *ih3;
	struct net_bridge_port_group *p;
1298
	struct net_bridge_port_group __rcu **pp;
1299
	struct br_ip saddr;
1300 1301
	unsigned long max_delay;
	unsigned long now = jiffies;
1302
	unsigned int offset = skb_transport_offset(skb);
1303
	__be32 group;
1304
	int err = 0;
1305 1306 1307 1308 1309 1310 1311 1312

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) ||
	    (port && port->state == BR_STATE_DISABLED))
		goto out;

	group = ih->group;

1313
	if (skb->len == offset + sizeof(*ih)) {
1314 1315 1316 1317 1318 1319
		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);

		if (!max_delay) {
			max_delay = 10 * HZ;
			group = 0;
		}
1320
	} else if (skb->len >= offset + sizeof(*ih3)) {
1321 1322
		ih3 = igmpv3_query_hdr(skb);
		if (ih3->nsrcs)
1323
			goto out;
1324

1325 1326
		max_delay = ih3->code ?
			    IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1327
	} else {
1328 1329 1330
		goto out;
	}

1331 1332 1333
	if (!group) {
		saddr.proto = htons(ETH_P_IP);
		saddr.u.ip4 = iph->saddr;
1334

1335 1336
		br_multicast_query_received(br, port, &br->ip4_other_query,
					    &saddr, max_delay);
1337
		goto out;
1338
	}
1339

1340
	mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1341 1342 1343 1344 1345
	if (!mp)
		goto out;

	max_delay *= br->multicast_last_member_count;

1346
	if (mp->mglist &&
1347 1348 1349 1350 1351
	    (timer_pending(&mp->timer) ?
	     time_after(mp->timer.expires, now + max_delay) :
	     try_to_del_timer_sync(&mp->timer) >= 0))
		mod_timer(&mp->timer, now + max_delay);

1352 1353 1354
	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
1355 1356 1357
		if (timer_pending(&p->timer) ?
		    time_after(p->timer.expires, now + max_delay) :
		    try_to_del_timer_sync(&p->timer) >= 0)
1358
			mod_timer(&p->timer, now + max_delay);
1359 1360 1361 1362
	}

out:
	spin_unlock(&br->multicast_lock);
1363
	return err;
1364 1365
}

E
Eric Dumazet 已提交
1366
#if IS_ENABLED(CONFIG_IPV6)
1367 1368
static int br_ip6_multicast_query(struct net_bridge *br,
				  struct net_bridge_port *port,
1369 1370
				  struct sk_buff *skb,
				  u16 vid)
1371
{
1372
	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1373
	struct mld_msg *mld;
1374 1375
	struct net_bridge_mdb_entry *mp;
	struct mld2_query *mld2q;
1376 1377
	struct net_bridge_port_group *p;
	struct net_bridge_port_group __rcu **pp;
1378
	struct br_ip saddr;
1379 1380
	unsigned long max_delay;
	unsigned long now = jiffies;
1381
	unsigned int offset = skb_transport_offset(skb);
1382
	const struct in6_addr *group = NULL;
1383
	bool is_general_query;
1384 1385 1386 1387 1388 1389 1390
	int err = 0;

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) ||
	    (port && port->state == BR_STATE_DISABLED))
		goto out;

1391 1392
	if (skb->len == offset + sizeof(*mld)) {
		if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1393 1394 1395 1396
			err = -EINVAL;
			goto out;
		}
		mld = (struct mld_msg *) icmp6_hdr(skb);
L
Li RongQing 已提交
1397
		max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1398 1399
		if (max_delay)
			group = &mld->mld_mca;
1400
	} else {
1401
		if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1402 1403 1404 1405 1406 1407
			err = -EINVAL;
			goto out;
		}
		mld2q = (struct mld2_query *)icmp6_hdr(skb);
		if (!mld2q->mld2q_nsrcs)
			group = &mld2q->mld2q_mca;
1408 1409

		max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1410 1411
	}

1412 1413
	is_general_query = group && ipv6_addr_any(group);

1414 1415 1416
	if (is_general_query) {
		saddr.proto = htons(ETH_P_IPV6);
		saddr.u.ip6 = ip6h->saddr;
1417

1418 1419
		br_multicast_query_received(br, port, &br->ip6_other_query,
					    &saddr, max_delay);
1420
		goto out;
1421 1422
	} else if (!group) {
		goto out;
1423
	}
1424

1425
	mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1426 1427 1428 1429
	if (!mp)
		goto out;

	max_delay *= br->multicast_last_member_count;
1430
	if (mp->mglist &&
1431 1432 1433 1434 1435
	    (timer_pending(&mp->timer) ?
	     time_after(mp->timer.expires, now + max_delay) :
	     try_to_del_timer_sync(&mp->timer) >= 0))
		mod_timer(&mp->timer, now + max_delay);

1436 1437 1438
	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
1439 1440 1441
		if (timer_pending(&p->timer) ?
		    time_after(p->timer.expires, now + max_delay) :
		    try_to_del_timer_sync(&p->timer) >= 0)
1442
			mod_timer(&p->timer, now + max_delay);
1443 1444 1445 1446 1447 1448 1449 1450
	}

out:
	spin_unlock(&br->multicast_lock);
	return err;
}
#endif

1451 1452 1453 1454 1455 1456
static void
br_multicast_leave_group(struct net_bridge *br,
			 struct net_bridge_port *port,
			 struct br_ip *group,
			 struct bridge_mcast_other_query *other_query,
			 struct bridge_mcast_own_query *own_query)
1457 1458 1459 1460 1461 1462 1463 1464 1465
{
	struct net_bridge_mdb_htable *mdb;
	struct net_bridge_mdb_entry *mp;
	struct net_bridge_port_group *p;
	unsigned long now;
	unsigned long time;

	spin_lock(&br->multicast_lock);
	if (!netif_running(br->dev) ||
1466
	    (port && port->state == BR_STATE_DISABLED))
1467 1468
		goto out;

1469
	mdb = mlock_dereference(br->mdb, br);
1470 1471 1472 1473
	mp = br_mdb_ip_get(mdb, group);
	if (!mp)
		goto out;

1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
	if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
		struct net_bridge_port_group __rcu **pp;

		for (pp = &mp->ports;
		     (p = mlock_dereference(*pp, br)) != NULL;
		     pp = &p->next) {
			if (p->port != port)
				continue;

			rcu_assign_pointer(*pp, p->next);
			hlist_del_init(&p->mglist);
			del_timer(&p->timer);
			call_rcu_bh(&p->rcu, br_multicast_free_pg);
1487 1488
			br_mdb_notify(br->dev, port, group, RTM_DELMDB,
				      p->flags);
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499

			if (!mp->ports && !mp->mglist &&
			    netif_running(br->dev))
				mod_timer(&mp->timer, jiffies);
		}
		goto out;
	}

	if (timer_pending(&other_query->timer))
		goto out;

1500
	if (br->multicast_querier) {
1501 1502 1503 1504
		__br_multicast_send_query(br, port, &mp->addr);

		time = jiffies + br->multicast_last_member_count *
				 br->multicast_last_member_interval;
1505

1506
		mod_timer(&own_query->timer, time);
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524

		for (p = mlock_dereference(mp->ports, br);
		     p != NULL;
		     p = mlock_dereference(p->next, br)) {
			if (p->port != port)
				continue;

			if (!hlist_unhashed(&p->mglist) &&
			    (timer_pending(&p->timer) ?
			     time_after(p->timer.expires, time) :
			     try_to_del_timer_sync(&p->timer) >= 0)) {
				mod_timer(&p->timer, time);
			}

			break;
		}
	}

1525 1526 1527 1528 1529
	now = jiffies;
	time = now + br->multicast_last_member_count *
		     br->multicast_last_member_interval;

	if (!port) {
1530
		if (mp->mglist &&
1531 1532 1533 1534 1535
		    (timer_pending(&mp->timer) ?
		     time_after(mp->timer.expires, time) :
		     try_to_del_timer_sync(&mp->timer) >= 0)) {
			mod_timer(&mp->timer, time);
		}
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553

		goto out;
	}

	for (p = mlock_dereference(mp->ports, br);
	     p != NULL;
	     p = mlock_dereference(p->next, br)) {
		if (p->port != port)
			continue;

		if (!hlist_unhashed(&p->mglist) &&
		    (timer_pending(&p->timer) ?
		     time_after(p->timer.expires, time) :
		     try_to_del_timer_sync(&p->timer) >= 0)) {
			mod_timer(&p->timer, time);
		}

		break;
1554 1555 1556 1557 1558
	}
out:
	spin_unlock(&br->multicast_lock);
}

1559 1560
static void br_ip4_multicast_leave_group(struct net_bridge *br,
					 struct net_bridge_port *port,
1561 1562
					 __be32 group,
					 __u16 vid)
1563 1564
{
	struct br_ip br_group;
1565
	struct bridge_mcast_own_query *own_query;
1566 1567 1568 1569

	if (ipv4_is_local_multicast(group))
		return;

1570 1571
	own_query = port ? &port->ip4_own_query : &br->ip4_own_query;

1572 1573
	br_group.u.ip4 = group;
	br_group.proto = htons(ETH_P_IP);
1574
	br_group.vid = vid;
1575

1576 1577
	br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
				 own_query);
1578 1579
}

E
Eric Dumazet 已提交
1580
#if IS_ENABLED(CONFIG_IPV6)
1581 1582
static void br_ip6_multicast_leave_group(struct net_bridge *br,
					 struct net_bridge_port *port,
1583 1584
					 const struct in6_addr *group,
					 __u16 vid)
1585 1586
{
	struct br_ip br_group;
1587
	struct bridge_mcast_own_query *own_query;
1588

1589
	if (ipv6_addr_is_ll_all_nodes(group))
1590 1591
		return;

1592 1593
	own_query = port ? &port->ip6_own_query : &br->ip6_own_query;

A
Alexey Dobriyan 已提交
1594
	br_group.u.ip6 = *group;
1595
	br_group.proto = htons(ETH_P_IPV6);
1596
	br_group.vid = vid;
1597

1598 1599
	br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
				 own_query);
1600 1601
}
#endif
1602

1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
static void br_multicast_err_count(const struct net_bridge *br,
				   const struct net_bridge_port *p,
				   __be16 proto)
{
	struct bridge_mcast_stats __percpu *stats;
	struct bridge_mcast_stats *pstats;

	if (!br->multicast_stats_enabled)
		return;

	if (p)
		stats = p->mcast_stats;
	else
		stats = br->mcast_stats;
	if (WARN_ON(!stats))
		return;

	pstats = this_cpu_ptr(stats);

	u64_stats_update_begin(&pstats->syncp);
	switch (proto) {
	case htons(ETH_P_IP):
		pstats->mstats.igmp_parse_errors++;
		break;
#if IS_ENABLED(CONFIG_IPV6)
	case htons(ETH_P_IPV6):
		pstats->mstats.mld_parse_errors++;
		break;
#endif
	}
	u64_stats_update_end(&pstats->syncp);
}

1636 1637
static int br_multicast_ipv4_rcv(struct net_bridge *br,
				 struct net_bridge_port *port,
1638 1639
				 struct sk_buff *skb,
				 u16 vid)
1640
{
1641
	struct sk_buff *skb_trimmed = NULL;
1642 1643 1644
	struct igmphdr *ih;
	int err;

1645
	err = ip_mc_check_igmp(skb, &skb_trimmed);
1646

1647 1648
	if (err == -ENOMSG) {
		if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr))
1649
			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1650
		return 0;
1651
	} else if (err < 0) {
1652
		br_multicast_err_count(br, port, skb->protocol);
1653
		return err;
1654
	}
1655

1656
	ih = igmp_hdr(skb);
1657
	BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1658 1659 1660 1661

	switch (ih->type) {
	case IGMP_HOST_MEMBERSHIP_REPORT:
	case IGMPV2_HOST_MEMBERSHIP_REPORT:
1662
		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1663
		err = br_ip4_multicast_add_group(br, port, ih->group, vid);
1664 1665
		break;
	case IGMPV3_HOST_MEMBERSHIP_REPORT:
1666
		err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
1667 1668
		break;
	case IGMP_HOST_MEMBERSHIP_QUERY:
1669
		err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
1670 1671
		break;
	case IGMP_HOST_LEAVE_MESSAGE:
1672
		br_ip4_multicast_leave_group(br, port, ih->group, vid);
1673 1674 1675
		break;
	}

1676
	if (skb_trimmed && skb_trimmed != skb)
1677 1678
		kfree_skb(skb_trimmed);

1679 1680 1681
	br_multicast_count(br, port, skb->protocol, BR_INPUT_SKB_CB(skb)->igmp,
			   BR_MCAST_DIR_RX);

1682 1683 1684
	return err;
}

E
Eric Dumazet 已提交
1685
#if IS_ENABLED(CONFIG_IPV6)
1686 1687
static int br_multicast_ipv6_rcv(struct net_bridge *br,
				 struct net_bridge_port *port,
1688 1689
				 struct sk_buff *skb,
				 u16 vid)
1690
{
1691 1692
	struct sk_buff *skb_trimmed = NULL;
	struct mld_msg *mld;
1693 1694
	int err;

1695
	err = ipv6_mc_check_mld(skb, &skb_trimmed);
1696

1697 1698 1699
	if (err == -ENOMSG) {
		if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1700
		return 0;
1701
	} else if (err < 0) {
1702
		br_multicast_err_count(br, port, skb->protocol);
1703
		return err;
1704 1705
	}

1706
	mld = (struct mld_msg *)skb_transport_header(skb);
1707
	BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1708

1709
	switch (mld->mld_type) {
1710
	case ICMPV6_MGM_REPORT:
1711
		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1712
		err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
1713 1714
		break;
	case ICMPV6_MLD2_REPORT:
1715
		err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
1716 1717
		break;
	case ICMPV6_MGM_QUERY:
1718
		err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
1719 1720
		break;
	case ICMPV6_MGM_REDUCTION:
1721
		br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
1722
		break;
1723 1724
	}

1725
	if (skb_trimmed && skb_trimmed != skb)
1726 1727
		kfree_skb(skb_trimmed);

1728 1729 1730
	br_multicast_count(br, port, skb->protocol, BR_INPUT_SKB_CB(skb)->igmp,
			   BR_MCAST_DIR_RX);

1731 1732 1733 1734
	return err;
}
#endif

1735
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1736
		     struct sk_buff *skb, u16 vid)
1737
{
1738 1739
	int ret = 0;

1740 1741 1742
	BR_INPUT_SKB_CB(skb)->igmp = 0;
	BR_INPUT_SKB_CB(skb)->mrouters_only = 0;

1743 1744 1745 1746 1747
	if (br->multicast_disabled)
		return 0;

	switch (skb->protocol) {
	case htons(ETH_P_IP):
1748 1749
		ret = br_multicast_ipv4_rcv(br, port, skb, vid);
		break;
E
Eric Dumazet 已提交
1750
#if IS_ENABLED(CONFIG_IPV6)
1751
	case htons(ETH_P_IPV6):
1752 1753
		ret = br_multicast_ipv6_rcv(br, port, skb, vid);
		break;
1754
#endif
1755 1756
	}

1757
	return ret;
1758 1759
}

1760
static void br_multicast_query_expired(struct net_bridge *br,
1761 1762
				       struct bridge_mcast_own_query *query,
				       struct bridge_mcast_querier *querier)
1763 1764 1765 1766 1767
{
	spin_lock(&br->multicast_lock);
	if (query->startup_sent < br->multicast_startup_query_count)
		query->startup_sent++;

1768
	RCU_INIT_POINTER(querier->port, NULL);
1769 1770 1771 1772 1773
	br_multicast_send_query(br, NULL, query);
	spin_unlock(&br->multicast_lock);
}

static void br_ip4_multicast_query_expired(unsigned long data)
1774 1775 1776
{
	struct net_bridge *br = (void *)data;

1777
	br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1778
}
1779

1780 1781 1782 1783
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_query_expired(unsigned long data)
{
	struct net_bridge *br = (void *)data;
1784

1785
	br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1786
}
1787
#endif
1788 1789 1790 1791 1792 1793

void br_multicast_init(struct net_bridge *br)
{
	br->hash_elasticity = 4;
	br->hash_max = 512;

1794
	br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1795
	br->multicast_querier = 0;
1796
	br->multicast_query_use_ifaddr = 0;
1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
	br->multicast_last_member_count = 2;
	br->multicast_startup_query_count = 2;

	br->multicast_last_member_interval = HZ;
	br->multicast_query_response_interval = 10 * HZ;
	br->multicast_startup_query_interval = 125 * HZ / 4;
	br->multicast_query_interval = 125 * HZ;
	br->multicast_querier_interval = 255 * HZ;
	br->multicast_membership_interval = 260 * HZ;

1807
	br->ip4_other_query.delay_time = 0;
1808
	br->ip4_querier.port = NULL;
1809
#if IS_ENABLED(CONFIG_IPV6)
1810
	br->ip6_other_query.delay_time = 0;
1811
	br->ip6_querier.port = NULL;
1812
#endif
1813
	br->has_ipv6_addr = 1;
1814

1815 1816 1817
	spin_lock_init(&br->multicast_lock);
	setup_timer(&br->multicast_router_timer,
		    br_multicast_local_router_expired, 0);
1818 1819 1820
	setup_timer(&br->ip4_other_query.timer,
		    br_ip4_multicast_querier_expired, (unsigned long)br);
	setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired,
1821
		    (unsigned long)br);
1822
#if IS_ENABLED(CONFIG_IPV6)
1823 1824 1825
	setup_timer(&br->ip6_other_query.timer,
		    br_ip6_multicast_querier_expired, (unsigned long)br);
	setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired,
1826 1827
		    (unsigned long)br);
#endif
1828 1829
}

1830
static void __br_multicast_open(struct net_bridge *br,
1831
				struct bridge_mcast_own_query *query)
1832
{
1833
	query->startup_sent = 0;
1834 1835 1836 1837

	if (br->multicast_disabled)
		return;

1838 1839 1840 1841 1842
	mod_timer(&query->timer, jiffies);
}

void br_multicast_open(struct net_bridge *br)
{
1843
	__br_multicast_open(br, &br->ip4_own_query);
1844
#if IS_ENABLED(CONFIG_IPV6)
1845
	__br_multicast_open(br, &br->ip6_own_query);
1846
#endif
1847 1848 1849 1850 1851
}

void br_multicast_stop(struct net_bridge *br)
{
	del_timer_sync(&br->multicast_router_timer);
1852 1853
	del_timer_sync(&br->ip4_other_query.timer);
	del_timer_sync(&br->ip4_own_query.timer);
1854
#if IS_ENABLED(CONFIG_IPV6)
1855 1856
	del_timer_sync(&br->ip6_other_query.timer);
	del_timer_sync(&br->ip6_own_query.timer);
1857
#endif
1858 1859 1860 1861 1862 1863 1864 1865 1866
}

void br_multicast_dev_del(struct net_bridge *br)
{
	struct net_bridge_mdb_htable *mdb;
	struct net_bridge_mdb_entry *mp;
	struct hlist_node *n;
	u32 ver;
	int i;
1867 1868

	spin_lock_bh(&br->multicast_lock);
1869
	mdb = mlock_dereference(br->mdb, br);
1870 1871 1872 1873 1874 1875 1876
	if (!mdb)
		goto out;

	br->mdb = NULL;

	ver = mdb->ver;
	for (i = 0; i < mdb->max; i++) {
1877
		hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1878 1879 1880 1881 1882 1883 1884 1885
					  hlist[ver]) {
			del_timer(&mp->timer);
			call_rcu_bh(&mp->rcu, br_multicast_free_group);
		}
	}

	if (mdb->old) {
		spin_unlock_bh(&br->multicast_lock);
1886
		rcu_barrier_bh();
1887 1888 1889 1890 1891 1892 1893 1894 1895
		spin_lock_bh(&br->multicast_lock);
		WARN_ON(mdb->old);
	}

	mdb->old = mdb;
	call_rcu_bh(&mdb->rcu, br_mdb_free);

out:
	spin_unlock_bh(&br->multicast_lock);
1896 1897

	free_percpu(br->mcast_stats);
1898
}
1899 1900 1901

int br_multicast_set_router(struct net_bridge *br, unsigned long val)
{
1902
	int err = -EINVAL;
1903 1904 1905 1906

	spin_lock_bh(&br->multicast_lock);

	switch (val) {
1907 1908
	case MDB_RTR_TYPE_DISABLED:
	case MDB_RTR_TYPE_PERM:
1909 1910
		del_timer(&br->multicast_router_timer);
		/* fall through */
1911
	case MDB_RTR_TYPE_TEMP_QUERY:
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921
		br->multicast_router = val;
		err = 0;
		break;
	}

	spin_unlock_bh(&br->multicast_lock);

	return err;
}

1922 1923 1924 1925 1926 1927 1928 1929
static void __del_port_router(struct net_bridge_port *p)
{
	if (hlist_unhashed(&p->rlist))
		return;
	hlist_del_init_rcu(&p->rlist);
	br_rtr_notify(p->br->dev, p, RTM_DELMDB);
}

1930 1931 1932
int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
{
	struct net_bridge *br = p->br;
1933
	unsigned long now = jiffies;
1934
	int err = -EINVAL;
1935 1936

	spin_lock(&br->multicast_lock);
1937
	if (p->multicast_router == val) {
1938 1939 1940 1941
		/* Refresh the temp router port timer */
		if (p->multicast_router == MDB_RTR_TYPE_TEMP)
			mod_timer(&p->multicast_router_timer,
				  now + br->multicast_querier_interval);
1942 1943 1944
		err = 0;
		goto unlock;
	}
1945
	switch (val) {
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
	case MDB_RTR_TYPE_DISABLED:
		p->multicast_router = MDB_RTR_TYPE_DISABLED;
		__del_port_router(p);
		del_timer(&p->multicast_router_timer);
		break;
	case MDB_RTR_TYPE_TEMP_QUERY:
		p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
		__del_port_router(p);
		break;
	case MDB_RTR_TYPE_PERM:
		p->multicast_router = MDB_RTR_TYPE_PERM;
1957 1958 1959
		del_timer(&p->multicast_router_timer);
		br_multicast_add_router(br, p);
		break;
1960 1961 1962 1963
	case MDB_RTR_TYPE_TEMP:
		p->multicast_router = MDB_RTR_TYPE_TEMP;
		br_multicast_mark_router(br, p);
		break;
1964 1965
	default:
		goto unlock;
1966
	}
1967 1968
	err = 0;
unlock:
1969 1970 1971 1972
	spin_unlock(&br->multicast_lock);

	return err;
}
1973

1974
static void br_multicast_start_querier(struct net_bridge *br,
1975
				       struct bridge_mcast_own_query *query)
1976 1977
{
	struct net_bridge_port *port;
1978

1979
	__br_multicast_open(br, query);
1980 1981 1982 1983 1984 1985

	list_for_each_entry(port, &br->port_list, list) {
		if (port->state == BR_STATE_DISABLED ||
		    port->state == BR_STATE_BLOCKING)
			continue;

1986 1987
		if (query == &br->ip4_own_query)
			br_multicast_enable(&port->ip4_own_query);
1988 1989
#if IS_ENABLED(CONFIG_IPV6)
		else
1990
			br_multicast_enable(&port->ip6_own_query);
1991
#endif
1992 1993 1994 1995 1996
	}
}

int br_multicast_toggle(struct net_bridge *br, unsigned long val)
{
1997
	int err = 0;
1998
	struct net_bridge_mdb_htable *mdb;
1999

2000
	spin_lock_bh(&br->multicast_lock);
2001 2002 2003 2004 2005 2006 2007
	if (br->multicast_disabled == !val)
		goto unlock;

	br->multicast_disabled = !val;
	if (br->multicast_disabled)
		goto unlock;

2008 2009 2010
	if (!netif_running(br->dev))
		goto unlock;

2011 2012 2013
	mdb = mlock_dereference(br->mdb, br);
	if (mdb) {
		if (mdb->old) {
2014 2015 2016 2017 2018 2019
			err = -EEXIST;
rollback:
			br->multicast_disabled = !!val;
			goto unlock;
		}

2020
		err = br_mdb_rehash(&br->mdb, mdb->max,
2021 2022 2023 2024 2025
				    br->hash_elasticity);
		if (err)
			goto rollback;
	}

2026
	br_multicast_start_querier(br, &br->ip4_own_query);
2027
#if IS_ENABLED(CONFIG_IPV6)
2028
	br_multicast_start_querier(br, &br->ip6_own_query);
2029
#endif
2030 2031

unlock:
2032
	spin_unlock_bh(&br->multicast_lock);
2033 2034 2035

	return err;
}
2036

2037 2038
int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
{
2039 2040
	unsigned long max_delay;

2041 2042 2043 2044 2045 2046 2047
	val = !!val;

	spin_lock_bh(&br->multicast_lock);
	if (br->multicast_querier == val)
		goto unlock;

	br->multicast_querier = val;
2048 2049 2050 2051 2052
	if (!val)
		goto unlock;

	max_delay = br->multicast_query_response_interval;

2053 2054
	if (!timer_pending(&br->ip4_other_query.timer))
		br->ip4_other_query.delay_time = jiffies + max_delay;
2055

2056
	br_multicast_start_querier(br, &br->ip4_own_query);
2057 2058

#if IS_ENABLED(CONFIG_IPV6)
2059 2060
	if (!timer_pending(&br->ip6_other_query.timer))
		br->ip6_other_query.delay_time = jiffies + max_delay;
2061

2062
	br_multicast_start_querier(br, &br->ip6_own_query);
2063
#endif
2064 2065 2066 2067 2068 2069 2070

unlock:
	spin_unlock_bh(&br->multicast_lock);

	return 0;
}

2071 2072
int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
{
2073
	int err = -EINVAL;
2074
	u32 old;
2075
	struct net_bridge_mdb_htable *mdb;
2076

2077
	spin_lock_bh(&br->multicast_lock);
2078 2079
	if (!is_power_of_2(val))
		goto unlock;
2080 2081 2082

	mdb = mlock_dereference(br->mdb, br);
	if (mdb && val < mdb->size)
2083 2084 2085 2086 2087 2088 2089
		goto unlock;

	err = 0;

	old = br->hash_max;
	br->hash_max = val;

2090 2091
	if (mdb) {
		if (mdb->old) {
2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104
			err = -EEXIST;
rollback:
			br->hash_max = old;
			goto unlock;
		}

		err = br_mdb_rehash(&br->mdb, br->hash_max,
				    br->hash_elasticity);
		if (err)
			goto rollback;
	}

unlock:
2105
	spin_unlock_bh(&br->multicast_lock);
2106 2107 2108

	return err;
}
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165

/**
 * br_multicast_list_adjacent - Returns snooped multicast addresses
 * @dev:	The bridge port adjacent to which to retrieve addresses
 * @br_ip_list:	The list to store found, snooped multicast IP addresses in
 *
 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
 * snooping feature on all bridge ports of dev's bridge device, excluding
 * the addresses from dev itself.
 *
 * Returns the number of items added to br_ip_list.
 *
 * Notes:
 * - br_ip_list needs to be initialized by caller
 * - br_ip_list might contain duplicates in the end
 *   (needs to be taken care of by caller)
 * - br_ip_list needs to be freed by caller
 */
int br_multicast_list_adjacent(struct net_device *dev,
			       struct list_head *br_ip_list)
{
	struct net_bridge *br;
	struct net_bridge_port *port;
	struct net_bridge_port_group *group;
	struct br_ip_list *entry;
	int count = 0;

	rcu_read_lock();
	if (!br_ip_list || !br_port_exists(dev))
		goto unlock;

	port = br_port_get_rcu(dev);
	if (!port || !port->br)
		goto unlock;

	br = port->br;

	list_for_each_entry_rcu(port, &br->port_list, list) {
		if (!port->dev || port->dev == dev)
			continue;

		hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
			entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
			if (!entry)
				goto unlock;

			entry->addr = group->addr;
			list_add(&entry->list, br_ip_list);
			count++;
		}
	}

unlock:
	rcu_read_unlock();
	return count;
}
EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2166

2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
/**
 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
 * @dev: The bridge port providing the bridge on which to check for a querier
 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
 *
 * Checks whether the given interface has a bridge on top and if so returns
 * true if a valid querier exists anywhere on the bridged link layer.
 * Otherwise returns false.
 */
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
{
	struct net_bridge *br;
	struct net_bridge_port *port;
	struct ethhdr eth;
	bool ret = false;

	rcu_read_lock();
	if (!br_port_exists(dev))
		goto unlock;

	port = br_port_get_rcu(dev);
	if (!port || !port->br)
		goto unlock;

	br = port->br;

	memset(&eth, 0, sizeof(eth));
	eth.h_proto = htons(proto);

	ret = br_multicast_querier_exists(br, &eth);

unlock:
	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);

2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234
/**
 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
 * @dev: The bridge port adjacent to which to check for a querier
 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
 *
 * Checks whether the given interface has a bridge on top and if so returns
 * true if a selected querier is behind one of the other ports of this
 * bridge. Otherwise returns false.
 */
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
{
	struct net_bridge *br;
	struct net_bridge_port *port;
	bool ret = false;

	rcu_read_lock();
	if (!br_port_exists(dev))
		goto unlock;

	port = br_port_get_rcu(dev);
	if (!port || !port->br)
		goto unlock;

	br = port->br;

	switch (proto) {
	case ETH_P_IP:
		if (!timer_pending(&br->ip4_other_query.timer) ||
		    rcu_dereference(br->ip4_querier.port) == port)
			goto unlock;
		break;
2235
#if IS_ENABLED(CONFIG_IPV6)
2236 2237 2238 2239 2240
	case ETH_P_IPV6:
		if (!timer_pending(&br->ip6_other_query.timer) ||
		    rcu_dereference(br->ip6_querier.port) == port)
			goto unlock;
		break;
2241
#endif
2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
	default:
		goto unlock;
	}

	ret = true;
unlock:
	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376

static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
			       __be16 proto, u8 type, u8 dir)
{
	struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);

	u64_stats_update_begin(&pstats->syncp);
	switch (proto) {
	case htons(ETH_P_IP):
		switch (type) {
		case IGMP_HOST_MEMBERSHIP_REPORT:
			pstats->mstats.igmp_v1reports[dir]++;
			break;
		case IGMPV2_HOST_MEMBERSHIP_REPORT:
			pstats->mstats.igmp_v2reports[dir]++;
			break;
		case IGMPV3_HOST_MEMBERSHIP_REPORT:
			pstats->mstats.igmp_v3reports[dir]++;
			break;
		case IGMP_HOST_MEMBERSHIP_QUERY:
			pstats->mstats.igmp_queries[dir]++;
			break;
		case IGMP_HOST_LEAVE_MESSAGE:
			pstats->mstats.igmp_leaves[dir]++;
			break;
		}
		break;
#if IS_ENABLED(CONFIG_IPV6)
	case htons(ETH_P_IPV6):
		switch (type) {
		case ICMPV6_MGM_REPORT:
			pstats->mstats.mld_v1reports[dir]++;
			break;
		case ICMPV6_MLD2_REPORT:
			pstats->mstats.mld_v2reports[dir]++;
			break;
		case ICMPV6_MGM_QUERY:
			pstats->mstats.mld_queries[dir]++;
			break;
		case ICMPV6_MGM_REDUCTION:
			pstats->mstats.mld_leaves[dir]++;
			break;
		}
		break;
#endif /* CONFIG_IPV6 */
	}
	u64_stats_update_end(&pstats->syncp);
}

void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
			__be16 proto, u8 type, u8 dir)
{
	struct bridge_mcast_stats __percpu *stats;

	/* if multicast_disabled is true then igmp type can't be set */
	if (!type || !br->multicast_stats_enabled)
		return;

	if (p)
		stats = p->mcast_stats;
	else
		stats = br->mcast_stats;
	if (WARN_ON(!stats))
		return;

	br_mcast_stats_add(stats, proto, type, dir);
}

int br_multicast_init_stats(struct net_bridge *br)
{
	br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
	if (!br->mcast_stats)
		return -ENOMEM;

	return 0;
}

static void mcast_stats_add_dir(u64 *dst, u64 *src)
{
	dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
	dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
}

void br_multicast_get_stats(const struct net_bridge *br,
			    const struct net_bridge_port *p,
			    struct br_mcast_stats *dest)
{
	struct bridge_mcast_stats __percpu *stats;
	struct br_mcast_stats tdst;
	int i;

	memset(dest, 0, sizeof(*dest));
	if (p)
		stats = p->mcast_stats;
	else
		stats = br->mcast_stats;
	if (WARN_ON(!stats))
		return;

	memset(&tdst, 0, sizeof(tdst));
	for_each_possible_cpu(i) {
		struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
		struct br_mcast_stats temp;
		unsigned int start;

		do {
			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
			memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));

		mcast_stats_add_dir(tdst.igmp_queries, temp.igmp_queries);
		mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
		mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
		mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
		mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
		tdst.igmp_parse_errors += temp.igmp_parse_errors;

		mcast_stats_add_dir(tdst.mld_queries, temp.mld_queries);
		mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
		mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
		mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
		tdst.mld_parse_errors += temp.mld_parse_errors;
	}
	memcpy(dest, &tdst, sizeof(*dest));
}