br_mdb.c 19.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7
#include <linux/err.h>
#include <linux/igmp.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
8
#include <linux/if_ether.h>
9 10
#include <net/ip.h>
#include <net/netlink.h>
11
#include <net/switchdev.h>
12 13
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
14
#include <net/addrconf.h>
15 16 17 18 19 20 21 22 23
#endif

#include "br_private.h"

static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
			       struct net_device *dev)
{
	struct net_bridge *br = netdev_priv(dev);
	struct net_bridge_port *p;
24
	struct nlattr *nest, *port_nest;
25 26 27 28

	if (!br->multicast_router || hlist_empty(&br->router_list))
		return 0;

29
	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
30 31 32
	if (nest == NULL)
		return -EMSGSIZE;

33
	hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
34 35
		if (!p)
			continue;
36
		port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
37
		if (!port_nest)
38
			goto fail;
39 40 41 42 43 44 45 46 47
		if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
		    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
				br_timer_value(&p->multicast_router_timer)) ||
		    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
			       p->multicast_router)) {
			nla_nest_cancel(skb, port_nest);
			goto fail;
		}
		nla_nest_end(skb, port_nest);
48 49 50 51 52 53 54 55 56
	}

	nla_nest_end(skb, nest);
	return 0;
fail:
	nla_nest_cancel(skb, nest);
	return -EMSGSIZE;
}

57 58 59 60 61 62
static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
{
	e->state = flags & MDB_PG_FLAGS_PERMANENT;
	e->flags = 0;
	if (flags & MDB_PG_FLAGS_OFFLOAD)
		e->flags |= MDB_FLAGS_OFFLOAD;
63 64
	if (flags & MDB_PG_FLAGS_FAST_LEAVE)
		e->flags |= MDB_FLAGS_FAST_LEAVE;
65 66
}

67 68 69 70 71 72 73 74 75 76 77 78 79
static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
{
	memset(ip, 0, sizeof(struct br_ip));
	ip->vid = entry->vid;
	ip->proto = entry->addr.proto;
	if (ip->proto == htons(ETH_P_IP))
		ip->u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
	else
		ip->u.ip6 = entry->addr.u.ip6;
#endif
}

80
static int __mdb_fill_info(struct sk_buff *skb,
81
			   struct net_bridge_mdb_entry *mp,
82 83
			   struct net_bridge_port_group *p)
{
84
	struct timer_list *mtimer;
85 86
	struct nlattr *nest_ent;
	struct br_mdb_entry e;
87 88
	u8 flags = 0;
	int ifindex;
89 90

	memset(&e, 0, sizeof(e));
91 92 93 94 95 96 97 98 99 100 101 102 103 104
	if (p) {
		ifindex = p->port->dev->ifindex;
		mtimer = &p->timer;
		flags = p->flags;
	} else {
		ifindex = mp->br->dev->ifindex;
		mtimer = &mp->timer;
	}

	__mdb_entry_fill_flags(&e, flags);
	e.ifindex = ifindex;
	e.vid = mp->addr.vid;
	if (mp->addr.proto == htons(ETH_P_IP))
		e.addr.u.ip4 = mp->addr.u.ip4;
105
#if IS_ENABLED(CONFIG_IPV6)
106 107
	if (mp->addr.proto == htons(ETH_P_IPV6))
		e.addr.u.ip6 = mp->addr.u.ip6;
108
#endif
109
	e.addr.proto = mp->addr.proto;
110 111 112 113 114 115 116 117
	nest_ent = nla_nest_start_noflag(skb,
					 MDBA_MDB_ENTRY_INFO);
	if (!nest_ent)
		return -EMSGSIZE;

	if (nla_put_nohdr(skb, sizeof(e), &e) ||
	    nla_put_u32(skb,
			MDBA_MDB_EATTR_TIMER,
118
			br_timer_value(mtimer))) {
119 120 121 122 123 124 125 126
		nla_nest_cancel(skb, nest_ent);
		return -EMSGSIZE;
	}
	nla_nest_end(skb, nest_ent);

	return 0;
}

127 128 129
static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
			    struct net_device *dev)
{
130
	int idx = 0, s_idx = cb->args[1], err = 0;
131
	struct net_bridge *br = netdev_priv(dev);
132
	struct net_bridge_mdb_entry *mp;
133 134
	struct nlattr *nest, *nest2;

135
	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
136 137
		return 0;

138
	nest = nla_nest_start_noflag(skb, MDBA_MDB);
139 140 141
	if (nest == NULL)
		return -EMSGSIZE;

142
	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
143 144
		struct net_bridge_port_group *p;
		struct net_bridge_port_group __rcu **pp;
145

146 147
		if (idx < s_idx)
			goto skip;
148

149
		nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
150 151 152 153
		if (!nest2) {
			err = -EMSGSIZE;
			break;
		}
154

155 156 157 158 159 160 161 162
		if (mp->host_joined) {
			err = __mdb_fill_info(skb, mp, NULL);
			if (err) {
				nla_nest_cancel(skb, nest2);
				break;
			}
		}

163 164
		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
		      pp = &p->next) {
165
			if (!p->port)
166 167
				continue;

168
			err = __mdb_fill_info(skb, mp, p);
169
			if (err) {
170 171 172
				nla_nest_cancel(skb, nest2);
				goto out;
			}
173
		}
174 175 176
		nla_nest_end(skb, nest2);
skip:
		idx++;
177 178 179 180 181 182 183 184
	}

out:
	cb->args[1] = idx;
	nla_nest_end(skb, nest);
	return err;
}

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
				 struct netlink_ext_ack *extack)
{
	struct br_port_msg *bpm;

	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
		NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
		return -EINVAL;
	}

	bpm = nlmsg_data(nlh);
	if (bpm->ifindex) {
		NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
		return -EINVAL;
	}
	if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
		NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
		return -EINVAL;
	}

	return 0;
}

208 209 210 211 212 213 214
static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net_device *dev;
	struct net *net = sock_net(skb->sk);
	struct nlmsghdr *nlh = NULL;
	int idx = 0, s_idx;

215 216 217 218 219 220 221
	if (cb->strict_check) {
		int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);

		if (err < 0)
			return err;
	}

222 223 224 225
	s_idx = cb->args[0];

	rcu_read_lock();

226
	cb->seq = net->dev_base_seq;
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241

	for_each_netdev_rcu(net, dev) {
		if (dev->priv_flags & IFF_EBRIDGE) {
			struct br_port_msg *bpm;

			if (idx < s_idx)
				goto skip;

			nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
					cb->nlh->nlmsg_seq, RTM_GETMDB,
					sizeof(*bpm), NLM_F_MULTI);
			if (nlh == NULL)
				break;

			bpm = nlmsg_data(nlh);
M
Mathias Krause 已提交
242
			memset(bpm, 0, sizeof(*bpm));
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
			bpm->ifindex = dev->ifindex;
			if (br_mdb_fill_info(skb, cb, dev) < 0)
				goto out;
			if (br_rports_fill_info(skb, cb, dev) < 0)
				goto out;

			cb->args[1] = 0;
			nlmsg_end(skb, nlh);
		skip:
			idx++;
		}
	}

out:
	if (nlh)
		nlmsg_end(skb, nlh);
	rcu_read_unlock();
	cb->args[0] = idx;
	return skb->len;
}

C
Cong Wang 已提交
264 265 266 267 268 269 270 271 272
static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
				   struct net_device *dev,
				   struct br_mdb_entry *entry, u32 pid,
				   u32 seq, int type, unsigned int flags)
{
	struct nlmsghdr *nlh;
	struct br_port_msg *bpm;
	struct nlattr *nest, *nest2;

273
	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
C
Cong Wang 已提交
274 275 276 277
	if (!nlh)
		return -EMSGSIZE;

	bpm = nlmsg_data(nlh);
M
Mathias Krause 已提交
278
	memset(bpm, 0, sizeof(*bpm));
C
Cong Wang 已提交
279 280
	bpm->family  = AF_BRIDGE;
	bpm->ifindex = dev->ifindex;
281
	nest = nla_nest_start_noflag(skb, MDBA_MDB);
C
Cong Wang 已提交
282 283
	if (nest == NULL)
		goto cancel;
284
	nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
C
Cong Wang 已提交
285 286 287 288 289 290 291 292
	if (nest2 == NULL)
		goto end;

	if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
		goto end;

	nla_nest_end(skb, nest2);
	nla_nest_end(skb, nest);
293 294
	nlmsg_end(skb, nlh);
	return 0;
C
Cong Wang 已提交
295 296 297 298 299 300 301 302 303 304 305 306 307 308

end:
	nla_nest_end(skb, nest);
cancel:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

static inline size_t rtnl_mdb_nlmsg_size(void)
{
	return NLMSG_ALIGN(sizeof(struct br_port_msg))
		+ nla_total_size(sizeof(struct br_mdb_entry));
}

309 310 311 312 313 314
struct br_mdb_complete_info {
	struct net_bridge_port *port;
	struct br_ip ip;
};

static void br_mdb_complete(struct net_device *dev, int err, void *priv)
C
Cong Wang 已提交
315
{
316 317 318 319 320 321 322 323 324 325 326
	struct br_mdb_complete_info *data = priv;
	struct net_bridge_port_group __rcu **pp;
	struct net_bridge_port_group *p;
	struct net_bridge_mdb_entry *mp;
	struct net_bridge_port *port = data->port;
	struct net_bridge *br = port->br;

	if (err)
		goto err;

	spin_lock_bh(&br->multicast_lock);
327
	mp = br_mdb_ip_get(br, &data->ip);
328 329 330 331 332 333 334 335 336 337 338 339 340 341
	if (!mp)
		goto out;
	for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
		if (p->port != port)
			continue;
		p->flags |= MDB_PG_FLAGS_OFFLOAD;
	}
out:
	spin_unlock_bh(&br->multicast_lock);
err:
	kfree(priv);
}

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
static void br_mdb_switchdev_host_port(struct net_device *dev,
				       struct net_device *lower_dev,
				       struct br_mdb_entry *entry, int type)
{
	struct switchdev_obj_port_mdb mdb = {
		.obj = {
			.id = SWITCHDEV_OBJ_ID_HOST_MDB,
			.flags = SWITCHDEV_F_DEFER,
		},
		.vid = entry->vid,
	};

	if (entry->addr.proto == htons(ETH_P_IP))
		ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
#if IS_ENABLED(CONFIG_IPV6)
	else
		ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
#endif

	mdb.obj.orig_dev = dev;
	switch (type) {
	case RTM_NEWMDB:
364
		switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
		break;
	case RTM_DELMDB:
		switchdev_port_obj_del(lower_dev, &mdb.obj);
		break;
	}
}

static void br_mdb_switchdev_host(struct net_device *dev,
				  struct br_mdb_entry *entry, int type)
{
	struct net_device *lower_dev;
	struct list_head *iter;

	netdev_for_each_lower_dev(dev, lower_dev, iter)
		br_mdb_switchdev_host_port(dev, lower_dev, entry, type);
}

382 383 384 385
static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
			    struct br_mdb_entry *entry, int type)
{
	struct br_mdb_complete_info *complete_info;
386 387 388 389 390 391 392 393
	struct switchdev_obj_port_mdb mdb = {
		.obj = {
			.id = SWITCHDEV_OBJ_ID_PORT_MDB,
			.flags = SWITCHDEV_F_DEFER,
		},
		.vid = entry->vid,
	};
	struct net_device *port_dev;
C
Cong Wang 已提交
394 395 396 397
	struct net *net = dev_net(dev);
	struct sk_buff *skb;
	int err = -ENOBUFS;

398 399 400 401 402 403 404 405 406
	port_dev = __dev_get_by_index(net, entry->ifindex);
	if (entry->addr.proto == htons(ETH_P_IP))
		ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
#if IS_ENABLED(CONFIG_IPV6)
	else
		ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
#endif

	mdb.obj.orig_dev = port_dev;
407
	if (p && port_dev && type == RTM_NEWMDB) {
408 409 410 411 412 413
		complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
		if (complete_info) {
			complete_info->port = p;
			__mdb_entry_to_br_ip(entry, &complete_info->ip);
			mdb.obj.complete_priv = complete_info;
			mdb.obj.complete = br_mdb_complete;
414
			if (switchdev_port_obj_add(port_dev, &mdb.obj, NULL))
415
				kfree(complete_info);
416
		}
417
	} else if (p && port_dev && type == RTM_DELMDB) {
418
		switchdev_port_obj_del(port_dev, &mdb.obj);
419
	}
420

421 422 423
	if (!p)
		br_mdb_switchdev_host(dev, entry, type);

C
Cong Wang 已提交
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
	skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
	if (!skb)
		goto errout;

	err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
	if (err < 0) {
		kfree_skb(skb);
		goto errout;
	}

	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
	return;
errout:
	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
}

440 441
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
		   struct br_ip *group, int type, u8 flags)
C
Cong Wang 已提交
442 443 444
{
	struct br_mdb_entry entry;

M
Mathias Krause 已提交
445
	memset(&entry, 0, sizeof(entry));
446 447 448 449
	if (port)
		entry.ifindex = port->dev->ifindex;
	else
		entry.ifindex = dev->ifindex;
450 451
	entry.addr.proto = group->proto;
	entry.addr.u.ip4 = group->u.ip4;
C
Cong Wang 已提交
452
#if IS_ENABLED(CONFIG_IPV6)
453
	entry.addr.u.ip6 = group->u.ip6;
C
Cong Wang 已提交
454
#endif
455 456 457
	entry.vid = group->vid;
	__mdb_entry_fill_flags(&entry, flags);
	__br_mdb_notify(dev, port, &entry, type);
C
Cong Wang 已提交
458 459
}

460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
				   struct net_device *dev,
				   int ifindex, u32 pid,
				   u32 seq, int type, unsigned int flags)
{
	struct br_port_msg *bpm;
	struct nlmsghdr *nlh;
	struct nlattr *nest;

	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
	if (!nlh)
		return -EMSGSIZE;

	bpm = nlmsg_data(nlh);
	memset(bpm, 0, sizeof(*bpm));
	bpm->family = AF_BRIDGE;
	bpm->ifindex = dev->ifindex;
477
	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
	if (!nest)
		goto cancel;

	if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
		goto end;

	nla_nest_end(skb, nest);
	nlmsg_end(skb, nlh);
	return 0;

end:
	nla_nest_end(skb, nest);
cancel:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

static inline size_t rtnl_rtr_nlmsg_size(void)
{
	return NLMSG_ALIGN(sizeof(struct br_port_msg))
		+ nla_total_size(sizeof(__u32));
}

void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
		   int type)
{
	struct net *net = dev_net(dev);
	struct sk_buff *skb;
	int err = -ENOBUFS;
	int ifindex;

	ifindex = port ? port->dev->ifindex : 0;
	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
	if (!skb)
		goto errout;

	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
	if (err < 0) {
		kfree_skb(skb);
		goto errout;
	}

	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
	return;

errout:
	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
}

527 528 529 530 531 532 533 534 535 536 537 538
static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
{
	if (entry->ifindex == 0)
		return false;

	if (entry->addr.proto == htons(ETH_P_IP)) {
		if (!ipv4_is_multicast(entry->addr.u.ip4))
			return false;
		if (ipv4_is_local_multicast(entry->addr.u.ip4))
			return false;
#if IS_ENABLED(CONFIG_IPV6)
	} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
539
		if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
540 541 542 543
			return false;
#endif
	} else
		return false;
544 545
	if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
		return false;
546 547
	if (entry->vid >= VLAN_VID_MASK)
		return false;
548 549 550 551 552 553 554 555 556 557 558 559 560 561

	return true;
}

static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
			struct net_device **pdev, struct br_mdb_entry **pentry)
{
	struct net *net = sock_net(skb->sk);
	struct br_mdb_entry *entry;
	struct br_port_msg *bpm;
	struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
	struct net_device *dev;
	int err;

562 563
	err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
				     MDBA_SET_ENTRY_MAX, NULL, NULL);
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
	if (err < 0)
		return err;

	bpm = nlmsg_data(nlh);
	if (bpm->ifindex == 0) {
		pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
		return -EINVAL;
	}

	dev = __dev_get_by_index(net, bpm->ifindex);
	if (dev == NULL) {
		pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
		return -ENODEV;
	}

	if (!(dev->priv_flags & IFF_EBRIDGE)) {
		pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
		return -EOPNOTSUPP;
	}

	*pdev = dev;

	if (!tb[MDBA_SET_ENTRY] ||
	    nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
		pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
		return -EINVAL;
	}

	entry = nla_data(tb[MDBA_SET_ENTRY]);
	if (!is_valid_mdb_entry(entry)) {
		pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
		return -EINVAL;
	}

	*pentry = entry;
	return 0;
}

static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
603
			    struct br_ip *group, unsigned char state)
604 605 606 607
{
	struct net_bridge_mdb_entry *mp;
	struct net_bridge_port_group *p;
	struct net_bridge_port_group __rcu **pp;
608
	unsigned long now = jiffies;
609 610
	int err;

611
	mp = br_mdb_ip_get(br, group);
612
	if (!mp) {
613
		mp = br_multicast_new_group(br, group);
614 615
		err = PTR_ERR_OR_ZERO(mp);
		if (err)
616 617 618
			return err;
	}

619 620 621 622 623 624 625 626 627 628 629 630 631
	/* host join */
	if (!port) {
		/* don't allow any flags for host-joined groups */
		if (state)
			return -EINVAL;
		if (mp->host_joined)
			return -EEXIST;

		br_multicast_host_join(mp, false);

		return 0;
	}

632 633 634 635 636 637 638 639 640
	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
		if (p->port == port)
			return -EEXIST;
		if ((unsigned long)p->port < (unsigned long)port)
			break;
	}

F
Felix Fietkau 已提交
641
	p = br_multicast_new_port_group(port, group, *pp, state, NULL);
642 643 644
	if (unlikely(!p))
		return -ENOMEM;
	rcu_assign_pointer(*pp, p);
645 646
	if (state == MDB_TEMPORARY)
		mod_timer(&p->timer, now + br->multicast_membership_interval);
647 648 649 650 651

	return 0;
}

static int __br_mdb_add(struct net *net, struct net_bridge *br,
652
			struct br_mdb_entry *entry)
653 654 655
{
	struct br_ip ip;
	struct net_device *dev;
656
	struct net_bridge_port *p = NULL;
657 658
	int ret;

659
	if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
660 661
		return -EINVAL;

662 663 664 665
	if (entry->ifindex != br->dev->ifindex) {
		dev = __dev_get_by_index(net, entry->ifindex);
		if (!dev)
			return -ENODEV;
666

667 668 669 670
		p = br_port_get_rtnl(dev);
		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
			return -EINVAL;
	}
671

672
	__mdb_entry_to_br_ip(entry, &ip);
673 674

	spin_lock_bh(&br->multicast_lock);
675
	ret = br_mdb_add_group(br, p, &ip, entry->state);
676 677 678 679
	spin_unlock_bh(&br->multicast_lock);
	return ret;
}

680 681
static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
		      struct netlink_ext_ack *extack)
682 683
{
	struct net *net = sock_net(skb->sk);
684
	struct net_bridge_vlan_group *vg;
685
	struct net_bridge_port *p = NULL;
686
	struct net_device *dev, *pdev;
687
	struct br_mdb_entry *entry;
688
	struct net_bridge_vlan *v;
689 690 691 692 693 694 695 696 697
	struct net_bridge *br;
	int err;

	err = br_mdb_parse(skb, nlh, &dev, &entry);
	if (err < 0)
		return err;

	br = netdev_priv(dev);

698 699 700 701
	if (entry->ifindex != br->dev->ifindex) {
		pdev = __dev_get_by_index(net, entry->ifindex);
		if (!pdev)
			return -ENODEV;
702

703 704 705 706 707 708 709
		p = br_port_get_rtnl(pdev);
		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
			return -EINVAL;
		vg = nbp_vlan_group(p);
	} else {
		vg = br_vlan_group(br);
	}
710

711 712 713
	/* If vlan filtering is enabled and VLAN is not specified
	 * install mdb entry on all vlans configured on the port.
	 */
714
	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
715 716
		list_for_each_entry(v, &vg->vlan_list, vlist) {
			entry->vid = v->vid;
717
			err = __br_mdb_add(net, br, entry);
718 719
			if (err)
				break;
720
			__br_mdb_notify(dev, p, entry, RTM_NEWMDB);
721 722
		}
	} else {
723
		err = __br_mdb_add(net, br, entry);
724
		if (!err)
725
			__br_mdb_notify(dev, p, entry, RTM_NEWMDB);
726 727
	}

728 729 730 731 732 733 734 735 736 737 738
	return err;
}

static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
{
	struct net_bridge_mdb_entry *mp;
	struct net_bridge_port_group *p;
	struct net_bridge_port_group __rcu **pp;
	struct br_ip ip;
	int err = -EINVAL;

739
	if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
740 741
		return -EINVAL;

742
	__mdb_entry_to_br_ip(entry, &ip);
743 744

	spin_lock_bh(&br->multicast_lock);
745
	mp = br_mdb_ip_get(br, &ip);
746 747 748
	if (!mp)
		goto unlock;

749 750 751 752 753 754 755 756 757
	/* host leave */
	if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
		br_multicast_host_leave(mp, false);
		err = 0;
		if (!mp->ports && netif_running(br->dev))
			mod_timer(&mp->timer, jiffies);
		goto unlock;
	}

758 759 760 761 762 763 764 765 766
	for (pp = &mp->ports;
	     (p = mlock_dereference(*pp, br)) != NULL;
	     pp = &p->next) {
		if (!p->port || p->port->dev->ifindex != entry->ifindex)
			continue;

		if (p->port->state == BR_STATE_DISABLED)
			goto unlock;

767
		__mdb_entry_fill_flags(entry, p->flags);
768 769 770
		rcu_assign_pointer(*pp, p->next);
		hlist_del_init(&p->mglist);
		del_timer(&p->timer);
771
		kfree_rcu(p, rcu);
772 773
		err = 0;

774
		if (!mp->ports && !mp->host_joined &&
775 776 777 778 779 780 781 782 783 784
		    netif_running(br->dev))
			mod_timer(&mp->timer, jiffies);
		break;
	}

unlock:
	spin_unlock_bh(&br->multicast_lock);
	return err;
}

785 786
static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
		      struct netlink_ext_ack *extack)
787
{
788
	struct net *net = sock_net(skb->sk);
789
	struct net_bridge_vlan_group *vg;
790
	struct net_bridge_port *p = NULL;
791
	struct net_device *dev, *pdev;
792
	struct br_mdb_entry *entry;
793
	struct net_bridge_vlan *v;
794 795 796 797 798 799 800 801 802
	struct net_bridge *br;
	int err;

	err = br_mdb_parse(skb, nlh, &dev, &entry);
	if (err < 0)
		return err;

	br = netdev_priv(dev);

803 804 805 806
	if (entry->ifindex != br->dev->ifindex) {
		pdev = __dev_get_by_index(net, entry->ifindex);
		if (!pdev)
			return -ENODEV;
807

808 809 810 811 812 813 814
		p = br_port_get_rtnl(pdev);
		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
			return -EINVAL;
		vg = nbp_vlan_group(p);
	} else {
		vg = br_vlan_group(br);
	}
815

816 817 818
	/* If vlan filtering is enabled and VLAN is not specified
	 * delete mdb entry on all vlans configured on the port.
	 */
819
	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
820 821
		list_for_each_entry(v, &vg->vlan_list, vlist) {
			entry->vid = v->vid;
822 823
			err = __br_mdb_del(br, entry);
			if (!err)
824
				__br_mdb_notify(dev, p, entry, RTM_DELMDB);
825 826 827 828
		}
	} else {
		err = __br_mdb_del(br, entry);
		if (!err)
829
			__br_mdb_notify(dev, p, entry, RTM_DELMDB);
830 831
	}

832 833 834
	return err;
}

835 836
void br_mdb_init(void)
{
837 838 839
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
840
}
841 842 843 844 845 846 847

void br_mdb_uninit(void)
{
	rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
	rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
	rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
}