ipmr.c 79.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *	IP multicast routing support for mrouted 3.6/3.8
 *
4
 *		(c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *	  Linux Consultancy and Custom Driver Development
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 *
 *	Fixes:
 *	Michael Chastain	:	Incorrect size of copying.
 *	Alan Cox		:	Added the cache manager code
 *	Alan Cox		:	Fixed the clone/copy bug and device race.
 *	Mike McLagan		:	Routing by source
 *	Malcolm Beattie		:	Buffer handling fixes.
 *	Alexey Kuznetsov	:	Double buffer free and other fixes.
 *	SVR Anand		:	Fixed several multicast bugs and problems.
 *	Alexey Kuznetsov	:	Status, optimisations and more.
 *	Brad Parker		:	Better behaviour on mrouted upcall
 *					overflow.
 *      Carlos Picoto           :       PIMv1 Support
 *	Pavlin Ivanov Radoslavov:	PIMv2 Registers must checksum only PIM header
25
 *					Relax this requirement to work with older peers.
L
Linus Torvalds 已提交
26 27 28
 *
 */

29
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
30
#include <linux/types.h>
31
#include <linux/cache.h>
32
#include <linux/capability.h>
L
Linus Torvalds 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mroute.h>
#include <linux/init.h>
48
#include <linux/if_ether.h>
49
#include <linux/slab.h>
50
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
51 52 53
#include <net/ip.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
54
#include <net/route.h>
L
Linus Torvalds 已提交
55 56 57 58 59 60
#include <net/icmp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <linux/notifier.h>
#include <linux/if_arp.h>
#include <linux/netfilter_ipv4.h>
61
#include <linux/compat.h>
62
#include <linux/export.h>
63
#include <net/ip_tunnels.h>
L
Linus Torvalds 已提交
64
#include <net/checksum.h>
65
#include <net/netlink.h>
66
#include <net/fib_rules.h>
67
#include <linux/netconf.h>
68
#include <net/nexthop.h>
69
#include <net/switchdev.h>
L
Linus Torvalds 已提交
70

71 72 73 74 75 76 77 78
struct ipmr_rule {
	struct fib_rule		common;
};

struct ipmr_result {
	struct mr_table		*mrt;
};

L
Linus Torvalds 已提交
79
/* Big lock, protecting vif table, mrt cache and mroute socket state.
E
Eric Dumazet 已提交
80
 * Note that the changes are semaphored via rtnl_lock.
L
Linus Torvalds 已提交
81 82 83 84
 */

static DEFINE_RWLOCK(mrt_lock);

85
/* Multicast router control variables */
L
Linus Torvalds 已提交
86 87 88 89 90

/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);

/* We return to original Alan's scheme. Hash table of resolved
E
Eric Dumazet 已提交
91 92 93 94 95
 * entries is changed only in process context and protected
 * with weak lock mrt_lock. Queue of unresolved entries is protected
 * with strong spinlock mfc_unres_lock.
 *
 * In this case data path is free of exclusive locks at all.
L
Linus Torvalds 已提交
96 97
 */

98
static struct kmem_cache *mrt_cachep __ro_after_init;
L
Linus Torvalds 已提交
99

100
static struct mr_table *ipmr_new_table(struct net *net, u32 id);
101 102
static void ipmr_free_table(struct mr_table *mrt);

103
static void ip_mr_forward(struct net *net, struct mr_table *mrt,
104 105
			  struct net_device *dev, struct sk_buff *skb,
			  struct mfc_cache *cache, int local);
106
static int ipmr_cache_report(struct mr_table *mrt,
107
			     struct sk_buff *pkt, vifi_t vifi, int assert);
108 109
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
			      struct mfc_cache *c, struct rtmsg *rtm);
110 111
static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
				 int cmd);
112
static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
113
static void mroute_clean_tables(struct mr_table *mrt, bool all);
114
static void ipmr_expire_process(struct timer_list *t);
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130

#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
#define ipmr_for_each_table(mrt, net) \
	list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)

static struct mr_table *ipmr_get_table(struct net *net, u32 id)
{
	struct mr_table *mrt;

	ipmr_for_each_table(mrt, net) {
		if (mrt->id == id)
			return mrt;
	}
	return NULL;
}

D
David S. Miller 已提交
131
static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
132 133 134
			   struct mr_table **mrt)
{
	int err;
135 136 137 138 139
	struct ipmr_result res;
	struct fib_lookup_arg arg = {
		.result = &res,
		.flags = FIB_LOOKUP_NOREF,
	};
140

141 142 143
	/* update flow if oif or iif point to device enslaved to l3mdev */
	l3mdev_update_flow(net, flowi4_to_flowi(flp4));

D
David S. Miller 已提交
144 145
	err = fib_rules_lookup(net->ipv4.mr_rules_ops,
			       flowi4_to_flowi(flp4), 0, &arg);
146 147 148 149 150 151 152 153 154 155 156
	if (err < 0)
		return err;
	*mrt = res.mrt;
	return 0;
}

static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
			    int flags, struct fib_lookup_arg *arg)
{
	struct ipmr_result *res = arg->result;
	struct mr_table *mrt;
L
Linus Torvalds 已提交
157

158 159 160 161 162 163 164 165 166 167 168 169
	switch (rule->action) {
	case FR_ACT_TO_TBL:
		break;
	case FR_ACT_UNREACHABLE:
		return -ENETUNREACH;
	case FR_ACT_PROHIBIT:
		return -EACCES;
	case FR_ACT_BLACKHOLE:
	default:
		return -EINVAL;
	}

170 171 172
	arg->table = fib_rule_get_table(rule, arg);

	mrt = ipmr_get_table(rule->fr_net, arg->table);
173
	if (!mrt)
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
		return -EAGAIN;
	res->mrt = mrt;
	return 0;
}

static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
{
	return 1;
}

static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
	FRA_GENERIC_POLICY,
};

static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
			       struct fib_rule_hdr *frh, struct nlattr **tb)
{
	return 0;
}

static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
			     struct nlattr **tb)
{
	return 1;
}

static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
			  struct fib_rule_hdr *frh)
{
	frh->dst_len = 0;
	frh->src_len = 0;
	frh->tos     = 0;
	return 0;
}

209
static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
210
	.family		= RTNL_FAMILY_IPMR,
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
	.rule_size	= sizeof(struct ipmr_rule),
	.addr_size	= sizeof(u32),
	.action		= ipmr_rule_action,
	.match		= ipmr_rule_match,
	.configure	= ipmr_rule_configure,
	.compare	= ipmr_rule_compare,
	.fill		= ipmr_rule_fill,
	.nlgroup	= RTNLGRP_IPV4_RULE,
	.policy		= ipmr_rule_policy,
	.owner		= THIS_MODULE,
};

static int __net_init ipmr_rules_init(struct net *net)
{
	struct fib_rules_ops *ops;
	struct mr_table *mrt;
	int err;

	ops = fib_rules_register(&ipmr_rules_ops_template, net);
	if (IS_ERR(ops))
		return PTR_ERR(ops);

	INIT_LIST_HEAD(&net->ipv4.mr_tables);

	mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
236 237
	if (IS_ERR(mrt)) {
		err = PTR_ERR(mrt);
238 239 240 241 242 243 244 245 246 247 248
		goto err1;
	}

	err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
	if (err < 0)
		goto err2;

	net->ipv4.mr_rules_ops = ops;
	return 0;

err2:
249
	ipmr_free_table(mrt);
250 251 252 253 254 255 256 257 258
err1:
	fib_rules_unregister(ops);
	return err;
}

static void __net_exit ipmr_rules_exit(struct net *net)
{
	struct mr_table *mrt, *next;

259
	rtnl_lock();
E
Eric Dumazet 已提交
260 261
	list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
		list_del(&mrt->list);
262
		ipmr_free_table(mrt);
E
Eric Dumazet 已提交
263
	}
264
	fib_rules_unregister(net->ipv4.mr_rules_ops);
265
	rtnl_unlock();
266
}
267 268 269 270 271 272 273 274 275 276

static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
{
	return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR);
}

static unsigned int ipmr_rules_seq_read(struct net *net)
{
	return fib_rules_seq_read(net, RTNL_FAMILY_IPMR);
}
277 278 279 280 281 282

bool ipmr_rule_default(const struct fib_rule *rule)
{
	return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT;
}
EXPORT_SYMBOL(ipmr_rule_default);
283 284 285 286 287 288 289 290 291
#else
#define ipmr_for_each_table(mrt, net) \
	for (mrt = net->ipv4.mrt; mrt; mrt = NULL)

static struct mr_table *ipmr_get_table(struct net *net, u32 id)
{
	return net->ipv4.mrt;
}

D
David S. Miller 已提交
292
static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
293 294 295 296 297 298 299 300
			   struct mr_table **mrt)
{
	*mrt = net->ipv4.mrt;
	return 0;
}

static int __net_init ipmr_rules_init(struct net *net)
{
301 302 303 304 305 306 307
	struct mr_table *mrt;

	mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
	if (IS_ERR(mrt))
		return PTR_ERR(mrt);
	net->ipv4.mrt = mrt;
	return 0;
308 309 310 311
}

static void __net_exit ipmr_rules_exit(struct net *net)
{
312
	rtnl_lock();
313
	ipmr_free_table(net->ipv4.mrt);
314 315
	net->ipv4.mrt = NULL;
	rtnl_unlock();
316
}
317 318 319 320 321 322 323 324 325 326

static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
{
	return 0;
}

static unsigned int ipmr_rules_seq_read(struct net *net)
{
	return 0;
}
327 328 329 330 331 332

bool ipmr_rule_default(const struct fib_rule *rule)
{
	return true;
}
EXPORT_SYMBOL(ipmr_rule_default);
333 334
#endif

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
				const void *ptr)
{
	const struct mfc_cache_cmp_arg *cmparg = arg->key;
	struct mfc_cache *c = (struct mfc_cache *)ptr;

	return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
	       cmparg->mfc_origin != c->mfc_origin;
}

static const struct rhashtable_params ipmr_rht_params = {
	.head_offset = offsetof(struct mfc_cache, mnode),
	.key_offset = offsetof(struct mfc_cache, cmparg),
	.key_len = sizeof(struct mfc_cache_cmp_arg),
	.nelem_hint = 3,
	.locks_mul = 1,
	.obj_cmpfn = ipmr_hash_cmp,
	.automatic_shrinking = true,
};

355 356 357
static struct mr_table *ipmr_new_table(struct net *net, u32 id)
{
	struct mr_table *mrt;
L
Linus Torvalds 已提交
358

359 360 361 362
	/* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
	if (id != RT_TABLE_DEFAULT && id >= 1000000000)
		return ERR_PTR(-EINVAL);

363
	mrt = ipmr_get_table(net, id);
364
	if (mrt)
365 366 367
		return mrt;

	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
368
	if (!mrt)
369
		return ERR_PTR(-ENOMEM);
370
	write_pnet(&mrt->net, net);
371 372
	mrt->id = id;

373 374
	rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
	INIT_LIST_HEAD(&mrt->mfc_cache_list);
375 376
	INIT_LIST_HEAD(&mrt->mfc_unres_queue);

377
	timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
378 379 380 381 382 383 384

	mrt->mroute_reg_vif_num = -1;
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
	list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
#endif
	return mrt;
}
L
Linus Torvalds 已提交
385

386 387 388
static void ipmr_free_table(struct mr_table *mrt)
{
	del_timer_sync(&mrt->ipmr_expire_timer);
389
	mroute_clean_tables(mrt, true);
390
	rhltable_destroy(&mrt->mfc_hash);
391 392 393
	kfree(mrt);
}

L
Linus Torvalds 已提交
394 395
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */

396 397
static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
{
398 399
	struct net *net = dev_net(dev);

400 401
	dev_close(dev);

402
	dev = __dev_get_by_name(net, "tunl0");
403
	if (dev) {
404
		const struct net_device_ops *ops = dev->netdev_ops;
405 406 407 408 409 410 411 412 413 414 415 416
		struct ifreq ifr;
		struct ip_tunnel_parm p;

		memset(&p, 0, sizeof(p));
		p.iph.daddr = v->vifc_rmt_addr.s_addr;
		p.iph.saddr = v->vifc_lcl_addr.s_addr;
		p.iph.version = 4;
		p.iph.ihl = 5;
		p.iph.protocol = IPPROTO_IPIP;
		sprintf(p.name, "dvmrp%d", v->vifc_vifi);
		ifr.ifr_ifru.ifru_data = (__force void __user *)&p;

417 418 419 420 421 422 423
		if (ops->ndo_do_ioctl) {
			mm_segment_t oldfs = get_fs();

			set_fs(KERNEL_DS);
			ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
			set_fs(oldfs);
		}
424 425 426
	}
}

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
/* Initialize ipmr pimreg/tunnel in_device */
static bool ipmr_init_vif_indev(const struct net_device *dev)
{
	struct in_device *in_dev;

	ASSERT_RTNL();

	in_dev = __in_dev_get_rtnl(dev);
	if (!in_dev)
		return false;
	ipv4_devconf_setall(in_dev);
	neigh_parms_data_state_setall(in_dev->arp_parms);
	IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;

	return true;
}

444
static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
L
Linus Torvalds 已提交
445 446 447
{
	struct net_device  *dev;

448
	dev = __dev_get_by_name(net, "tunl0");
L
Linus Torvalds 已提交
449 450

	if (dev) {
451
		const struct net_device_ops *ops = dev->netdev_ops;
L
Linus Torvalds 已提交
452 453 454 455 456 457 458 459 460 461 462
		int err;
		struct ifreq ifr;
		struct ip_tunnel_parm p;

		memset(&p, 0, sizeof(p));
		p.iph.daddr = v->vifc_rmt_addr.s_addr;
		p.iph.saddr = v->vifc_lcl_addr.s_addr;
		p.iph.version = 4;
		p.iph.ihl = 5;
		p.iph.protocol = IPPROTO_IPIP;
		sprintf(p.name, "dvmrp%d", v->vifc_vifi);
S
Stephen Hemminger 已提交
463
		ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
L
Linus Torvalds 已提交
464

465 466 467 468 469 470
		if (ops->ndo_do_ioctl) {
			mm_segment_t oldfs = get_fs();

			set_fs(KERNEL_DS);
			err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
			set_fs(oldfs);
E
Eric Dumazet 已提交
471
		} else {
472
			err = -EOPNOTSUPP;
E
Eric Dumazet 已提交
473
		}
L
Linus Torvalds 已提交
474 475
		dev = NULL;

476 477
		if (err == 0 &&
		    (dev = __dev_get_by_name(net, p.name)) != NULL) {
L
Linus Torvalds 已提交
478
			dev->flags |= IFF_MULTICAST;
479
			if (!ipmr_init_vif_indev(dev))
L
Linus Torvalds 已提交
480 481 482
				goto failure;
			if (dev_open(dev))
				goto failure;
483
			dev_hold(dev);
L
Linus Torvalds 已提交
484 485 486 487 488 489 490 491 492
		}
	}
	return dev;

failure:
	unregister_netdevice(dev);
	return NULL;
}

493
#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
494
static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
495
{
496
	struct net *net = dev_net(dev);
497
	struct mr_table *mrt;
D
David S. Miller 已提交
498 499
	struct flowi4 fl4 = {
		.flowi4_oif	= dev->ifindex,
500
		.flowi4_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
D
David S. Miller 已提交
501
		.flowi4_mark	= skb->mark,
502 503 504
	};
	int err;

D
David S. Miller 已提交
505
	err = ipmr_fib_lookup(net, &fl4, &mrt);
506 507
	if (err < 0) {
		kfree_skb(skb);
508
		return err;
509
	}
510

L
Linus Torvalds 已提交
511
	read_lock(&mrt_lock);
512 513
	dev->stats.tx_bytes += skb->len;
	dev->stats.tx_packets++;
514
	ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
L
Linus Torvalds 已提交
515 516
	read_unlock(&mrt_lock);
	kfree_skb(skb);
517
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
518 519
}

520 521 522 523 524
static int reg_vif_get_iflink(const struct net_device *dev)
{
	return 0;
}

525 526
static const struct net_device_ops reg_vif_netdev_ops = {
	.ndo_start_xmit	= reg_vif_xmit,
527
	.ndo_get_iflink = reg_vif_get_iflink,
528 529
};

L
Linus Torvalds 已提交
530 531 532
static void reg_vif_setup(struct net_device *dev)
{
	dev->type		= ARPHRD_PIMREG;
533
	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr) - 8;
L
Linus Torvalds 已提交
534
	dev->flags		= IFF_NOARP;
535
	dev->netdev_ops		= &reg_vif_netdev_ops;
536
	dev->needs_free_netdev	= true;
T
Tom Goff 已提交
537
	dev->features		|= NETIF_F_NETNS_LOCAL;
L
Linus Torvalds 已提交
538 539
}

540
static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
L
Linus Torvalds 已提交
541 542
{
	struct net_device *dev;
543
	char name[IFNAMSIZ];
L
Linus Torvalds 已提交
544

545 546 547 548
	if (mrt->id == RT_TABLE_DEFAULT)
		sprintf(name, "pimreg");
	else
		sprintf(name, "pimreg%u", mrt->id);
L
Linus Torvalds 已提交
549

550
	dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
L
Linus Torvalds 已提交
551

552
	if (!dev)
L
Linus Torvalds 已提交
553 554
		return NULL;

T
Tom Goff 已提交
555 556
	dev_net_set(dev, net);

L
Linus Torvalds 已提交
557 558 559 560 561
	if (register_netdevice(dev)) {
		free_netdev(dev);
		return NULL;
	}

562
	if (!ipmr_init_vif_indev(dev))
L
Linus Torvalds 已提交
563 564 565 566
		goto failure;
	if (dev_open(dev))
		goto failure;

567 568
	dev_hold(dev);

L
Linus Torvalds 已提交
569 570 571 572 573 574
	return dev;

failure:
	unregister_netdevice(dev);
	return NULL;
}
575 576 577 578 579 580 581 582 583

/* called with rcu_read_lock() */
static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
		     unsigned int pimlen)
{
	struct net_device *reg_dev = NULL;
	struct iphdr *encap;

	encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
584
	/* Check that:
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
	 * a. packet is really sent to a multicast group
	 * b. packet is not a NULL-REGISTER
	 * c. packet is not truncated
	 */
	if (!ipv4_is_multicast(encap->daddr) ||
	    encap->tot_len == 0 ||
	    ntohs(encap->tot_len) + pimlen > skb->len)
		return 1;

	read_lock(&mrt_lock);
	if (mrt->mroute_reg_vif_num >= 0)
		reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
	read_unlock(&mrt_lock);

	if (!reg_dev)
		return 1;

	skb->mac_header = skb->network_header;
	skb_pull(skb, (u8 *)encap - skb->data);
	skb_reset_network_header(skb);
	skb->protocol = htons(ETH_P_IP);
	skb->ip_summed = CHECKSUM_NONE;

	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));

	netif_rx(skb);

	return NET_RX_SUCCESS;
}
#else
static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
{
	return NULL;
}
L
Linus Torvalds 已提交
619 620
#endif

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
static int call_ipmr_vif_entry_notifier(struct notifier_block *nb,
					struct net *net,
					enum fib_event_type event_type,
					struct vif_device *vif,
					vifi_t vif_index, u32 tb_id)
{
	struct vif_entry_notifier_info info = {
		.info = {
			.family = RTNL_FAMILY_IPMR,
			.net = net,
		},
		.dev = vif->dev,
		.vif_index = vif_index,
		.vif_flags = vif->flags,
		.tb_id = tb_id,
	};

	return call_fib_notifier(nb, net, event_type, &info.info);
}

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
static int call_ipmr_vif_entry_notifiers(struct net *net,
					 enum fib_event_type event_type,
					 struct vif_device *vif,
					 vifi_t vif_index, u32 tb_id)
{
	struct vif_entry_notifier_info info = {
		.info = {
			.family = RTNL_FAMILY_IPMR,
			.net = net,
		},
		.dev = vif->dev,
		.vif_index = vif_index,
		.vif_flags = vif->flags,
		.tb_id = tb_id,
	};

	ASSERT_RTNL();
	net->ipv4.ipmr_seq++;
	return call_fib_notifiers(net, event_type, &info.info);
}

662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
static int call_ipmr_mfc_entry_notifier(struct notifier_block *nb,
					struct net *net,
					enum fib_event_type event_type,
					struct mfc_cache *mfc, u32 tb_id)
{
	struct mfc_entry_notifier_info info = {
		.info = {
			.family = RTNL_FAMILY_IPMR,
			.net = net,
		},
		.mfc = mfc,
		.tb_id = tb_id
	};

	return call_fib_notifier(nb, net, event_type, &info.info);
}

679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
static int call_ipmr_mfc_entry_notifiers(struct net *net,
					 enum fib_event_type event_type,
					 struct mfc_cache *mfc, u32 tb_id)
{
	struct mfc_entry_notifier_info info = {
		.info = {
			.family = RTNL_FAMILY_IPMR,
			.net = net,
		},
		.mfc = mfc,
		.tb_id = tb_id
	};

	ASSERT_RTNL();
	net->ipv4.ipmr_seq++;
	return call_fib_notifiers(net, event_type, &info.info);
}

697 698
/**
 *	vif_delete - Delete a VIF entry
699
 *	@notify: Set to 1, if the caller is a notifier_call
L
Linus Torvalds 已提交
700
 */
701
static int vif_delete(struct mr_table *mrt, int vifi, int notify,
702
		      struct list_head *head)
L
Linus Torvalds 已提交
703
{
704
	struct net *net = read_pnet(&mrt->net);
L
Linus Torvalds 已提交
705 706 707 708
	struct vif_device *v;
	struct net_device *dev;
	struct in_device *in_dev;

709
	if (vifi < 0 || vifi >= mrt->maxvif)
L
Linus Torvalds 已提交
710 711
		return -EADDRNOTAVAIL;

712
	v = &mrt->vif_table[vifi];
L
Linus Torvalds 已提交
713

714 715 716 717
	if (VIF_EXISTS(mrt, vifi))
		call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, vifi,
					      mrt->id);

L
Linus Torvalds 已提交
718 719 720 721 722 723 724 725 726
	write_lock_bh(&mrt_lock);
	dev = v->dev;
	v->dev = NULL;

	if (!dev) {
		write_unlock_bh(&mrt_lock);
		return -EADDRNOTAVAIL;
	}

727 728
	if (vifi == mrt->mroute_reg_vif_num)
		mrt->mroute_reg_vif_num = -1;
L
Linus Torvalds 已提交
729

E
Eric Dumazet 已提交
730
	if (vifi + 1 == mrt->maxvif) {
L
Linus Torvalds 已提交
731
		int tmp;
E
Eric Dumazet 已提交
732 733

		for (tmp = vifi - 1; tmp >= 0; tmp--) {
734
			if (VIF_EXISTS(mrt, tmp))
L
Linus Torvalds 已提交
735 736
				break;
		}
737
		mrt->maxvif = tmp+1;
L
Linus Torvalds 已提交
738 739 740 741 742 743
	}

	write_unlock_bh(&mrt_lock);

	dev_set_allmulti(dev, -1);

E
Eric Dumazet 已提交
744 745
	in_dev = __in_dev_get_rtnl(dev);
	if (in_dev) {
746
		IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
747
		inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
748 749
					    NETCONFA_MC_FORWARDING,
					    dev->ifindex, &in_dev->cnf);
L
Linus Torvalds 已提交
750 751 752
		ip_rt_multicast_event(in_dev);
	}

E
Eric Dumazet 已提交
753
	if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
754
		unregister_netdevice_queue(dev, head);
L
Linus Torvalds 已提交
755 756 757 758 759

	dev_put(dev);
	return 0;
}

760
static void ipmr_cache_free_rcu(struct rcu_head *head)
761
{
762 763
	struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);

764 765 766
	kmem_cache_free(mrt_cachep, c);
}

767
void ipmr_cache_free(struct mfc_cache *c)
768 769 770
{
	call_rcu(&c->rcu, ipmr_cache_free_rcu);
}
771
EXPORT_SYMBOL(ipmr_cache_free);
772

L
Linus Torvalds 已提交
773
/* Destroy an unresolved cache entry, killing queued skbs
E
Eric Dumazet 已提交
774
 * and reporting error to netlink readers.
L
Linus Torvalds 已提交
775
 */
776
static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
L
Linus Torvalds 已提交
777
{
778
	struct net *net = read_pnet(&mrt->net);
L
Linus Torvalds 已提交
779
	struct sk_buff *skb;
780
	struct nlmsgerr *e;
L
Linus Torvalds 已提交
781

782
	atomic_dec(&mrt->cache_resolve_queue_len);
L
Linus Torvalds 已提交
783

J
Jianjun Kong 已提交
784
	while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
785
		if (ip_hdr(skb)->version == 0) {
786 787
			struct nlmsghdr *nlh = skb_pull(skb,
							sizeof(struct iphdr));
L
Linus Torvalds 已提交
788
			nlh->nlmsg_type = NLMSG_ERROR;
789
			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
L
Linus Torvalds 已提交
790
			skb_trim(skb, nlh->nlmsg_len);
791
			e = nlmsg_data(nlh);
792 793
			e->error = -ETIMEDOUT;
			memset(&e->msg, 0, sizeof(e->msg));
794

795
			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
E
Eric Dumazet 已提交
796
		} else {
L
Linus Torvalds 已提交
797
			kfree_skb(skb);
E
Eric Dumazet 已提交
798
		}
L
Linus Torvalds 已提交
799 800
	}

801
	ipmr_cache_free(c);
L
Linus Torvalds 已提交
802 803
}

804
/* Timer process for the unresolved queue. */
805
static void ipmr_expire_process(struct timer_list *t)
L
Linus Torvalds 已提交
806
{
807
	struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
L
Linus Torvalds 已提交
808 809
	unsigned long now;
	unsigned long expires;
810
	struct mfc_cache *c, *next;
L
Linus Torvalds 已提交
811 812

	if (!spin_trylock(&mfc_unres_lock)) {
813
		mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
L
Linus Torvalds 已提交
814 815 816
		return;
	}

817
	if (list_empty(&mrt->mfc_unres_queue))
L
Linus Torvalds 已提交
818 819 820 821 822
		goto out;

	now = jiffies;
	expires = 10*HZ;

823
	list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
L
Linus Torvalds 已提交
824 825 826 827 828 829 830
		if (time_after(c->mfc_un.unres.expires, now)) {
			unsigned long interval = c->mfc_un.unres.expires - now;
			if (interval < expires)
				expires = interval;
			continue;
		}

831
		list_del(&c->list);
832
		mroute_netlink_event(mrt, c, RTM_DELROUTE);
833
		ipmr_destroy_unres(mrt, c);
L
Linus Torvalds 已提交
834 835
	}

836 837
	if (!list_empty(&mrt->mfc_unres_queue))
		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
L
Linus Torvalds 已提交
838 839 840 841 842 843

out:
	spin_unlock(&mfc_unres_lock);
}

/* Fill oifs list. It is called under write locked mrt_lock. */
844
static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
845
				   unsigned char *ttls)
L
Linus Torvalds 已提交
846 847 848 849 850 851 852
{
	int vifi;

	cache->mfc_un.res.minvif = MAXVIFS;
	cache->mfc_un.res.maxvif = 0;
	memset(cache->mfc_un.res.ttls, 255, MAXVIFS);

853 854
	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
		if (VIF_EXISTS(mrt, vifi) &&
855
		    ttls[vifi] && ttls[vifi] < 255) {
L
Linus Torvalds 已提交
856 857 858 859 860 861 862
			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
			if (cache->mfc_un.res.minvif > vifi)
				cache->mfc_un.res.minvif = vifi;
			if (cache->mfc_un.res.maxvif <= vifi)
				cache->mfc_un.res.maxvif = vifi + 1;
		}
	}
863
	cache->mfc_un.res.lastuse = jiffies;
L
Linus Torvalds 已提交
864 865
}

866 867
static int vif_add(struct net *net, struct mr_table *mrt,
		   struct vifctl *vifc, int mrtsock)
L
Linus Torvalds 已提交
868 869
{
	int vifi = vifc->vifc_vifi;
870 871 872
	struct switchdev_attr attr = {
		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
	};
873
	struct vif_device *v = &mrt->vif_table[vifi];
L
Linus Torvalds 已提交
874 875
	struct net_device *dev;
	struct in_device *in_dev;
876
	int err;
L
Linus Torvalds 已提交
877 878

	/* Is vif busy ? */
879
	if (VIF_EXISTS(mrt, vifi))
L
Linus Torvalds 已提交
880 881 882 883
		return -EADDRINUSE;

	switch (vifc->vifc_flags) {
	case VIFF_REGISTER:
884
		if (!ipmr_pimsm_enabled())
885 886
			return -EINVAL;
		/* Special Purpose VIF in PIM
L
Linus Torvalds 已提交
887 888
		 * All the packets will be sent to the daemon
		 */
889
		if (mrt->mroute_reg_vif_num >= 0)
L
Linus Torvalds 已提交
890
			return -EADDRINUSE;
891
		dev = ipmr_reg_vif(net, mrt);
L
Linus Torvalds 已提交
892 893
		if (!dev)
			return -ENOBUFS;
894 895 896
		err = dev_set_allmulti(dev, 1);
		if (err) {
			unregister_netdevice(dev);
897
			dev_put(dev);
898 899
			return err;
		}
L
Linus Torvalds 已提交
900
		break;
901
	case VIFF_TUNNEL:
902
		dev = ipmr_new_tunnel(net, vifc);
L
Linus Torvalds 已提交
903 904
		if (!dev)
			return -ENOBUFS;
905 906 907
		err = dev_set_allmulti(dev, 1);
		if (err) {
			ipmr_del_tunnel(dev, vifc);
908
			dev_put(dev);
909 910
			return err;
		}
L
Linus Torvalds 已提交
911
		break;
912
	case VIFF_USE_IFINDEX:
L
Linus Torvalds 已提交
913
	case 0:
914 915
		if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
			dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
916
			if (dev && !__in_dev_get_rtnl(dev)) {
917 918 919
				dev_put(dev);
				return -EADDRNOTAVAIL;
			}
E
Eric Dumazet 已提交
920
		} else {
921
			dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
E
Eric Dumazet 已提交
922
		}
L
Linus Torvalds 已提交
923 924
		if (!dev)
			return -EADDRNOTAVAIL;
925
		err = dev_set_allmulti(dev, 1);
926 927
		if (err) {
			dev_put(dev);
928
			return err;
929
		}
L
Linus Torvalds 已提交
930 931 932 933 934
		break;
	default:
		return -EINVAL;
	}

E
Eric Dumazet 已提交
935 936
	in_dev = __in_dev_get_rtnl(dev);
	if (!in_dev) {
937
		dev_put(dev);
L
Linus Torvalds 已提交
938
		return -EADDRNOTAVAIL;
939
	}
940
	IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
941 942
	inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
				    dev->ifindex, &in_dev->cnf);
L
Linus Torvalds 已提交
943 944
	ip_rt_multicast_event(in_dev);

E
Eric Dumazet 已提交
945
	/* Fill in the VIF structures */
946 947 948 949
	vif_device_init(v, dev, vifc->vifc_rate_limit,
			vifc->vifc_threshold,
			vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0),
			(VIFF_TUNNEL | VIFF_REGISTER));
E
Eric Dumazet 已提交
950

951 952 953 954 955 956 957
	attr.orig_dev = dev;
	if (!switchdev_port_attr_get(dev, &attr)) {
		memcpy(v->dev_parent_id.id, attr.u.ppid.id, attr.u.ppid.id_len);
		v->dev_parent_id.id_len = attr.u.ppid.id_len;
	} else {
		v->dev_parent_id.id_len = 0;
	}
958

J
Jianjun Kong 已提交
959 960
	v->local = vifc->vifc_lcl_addr.s_addr;
	v->remote = vifc->vifc_rmt_addr.s_addr;
L
Linus Torvalds 已提交
961 962 963

	/* And finish update writing critical data */
	write_lock_bh(&mrt_lock);
J
Jianjun Kong 已提交
964
	v->dev = dev;
E
Eric Dumazet 已提交
965
	if (v->flags & VIFF_REGISTER)
966 967 968
		mrt->mroute_reg_vif_num = vifi;
	if (vifi+1 > mrt->maxvif)
		mrt->maxvif = vifi+1;
L
Linus Torvalds 已提交
969
	write_unlock_bh(&mrt_lock);
970
	call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, vifi, mrt->id);
L
Linus Torvalds 已提交
971 972 973
	return 0;
}

974
/* called with rcu_read_lock() */
975
static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
976 977
					 __be32 origin,
					 __be32 mcastgrp)
L
Linus Torvalds 已提交
978
{
979 980 981 982 983
	struct mfc_cache_cmp_arg arg = {
			.mfc_mcastgrp = mcastgrp,
			.mfc_origin = origin
	};
	struct rhlist_head *tmp, *list;
L
Linus Torvalds 已提交
984 985
	struct mfc_cache *c;

986 987 988 989
	list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
	rhl_for_each_entry_rcu(c, tmp, list, mnode)
		return c;

990
	return NULL;
L
Linus Torvalds 已提交
991 992
}

993 994 995 996
/* Look for a (*,*,oif) entry */
static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
						    int vifi)
{
997 998 999 1000 1001
	struct mfc_cache_cmp_arg arg = {
			.mfc_mcastgrp = htonl(INADDR_ANY),
			.mfc_origin = htonl(INADDR_ANY)
	};
	struct rhlist_head *tmp, *list;
1002 1003
	struct mfc_cache *c;

1004 1005 1006
	list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
	rhl_for_each_entry_rcu(c, tmp, list, mnode)
		if (c->mfc_un.res.ttls[vifi] < 255)
1007 1008 1009 1010 1011 1012 1013 1014 1015
			return c;

	return NULL;
}

/* Look for a (*,G) entry */
static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
					     __be32 mcastgrp, int vifi)
{
1016 1017 1018 1019 1020
	struct mfc_cache_cmp_arg arg = {
			.mfc_mcastgrp = mcastgrp,
			.mfc_origin = htonl(INADDR_ANY)
	};
	struct rhlist_head *tmp, *list;
1021 1022
	struct mfc_cache *c, *proxy;

1023
	if (mcastgrp == htonl(INADDR_ANY))
1024 1025
		goto skip;

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
	list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
	rhl_for_each_entry_rcu(c, tmp, list, mnode) {
		if (c->mfc_un.res.ttls[vifi] < 255)
			return c;

		/* It's ok if the vifi is part of the static tree */
		proxy = ipmr_cache_find_any_parent(mrt, c->mfc_parent);
		if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
			return c;
	}
1036 1037 1038 1039 1040

skip:
	return ipmr_cache_find_any_parent(mrt, vifi);
}

1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
/* Look for a (S,G,iif) entry if parent != -1 */
static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
						__be32 origin, __be32 mcastgrp,
						int parent)
{
	struct mfc_cache_cmp_arg arg = {
			.mfc_mcastgrp = mcastgrp,
			.mfc_origin = origin,
	};
	struct rhlist_head *tmp, *list;
	struct mfc_cache *c;

	list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
	rhl_for_each_entry_rcu(c, tmp, list, mnode)
		if (parent == -1 || parent == c->mfc_parent)
			return c;

	return NULL;
}

1061
/* Allocate a multicast cache entry */
1062
static struct mfc_cache *ipmr_cache_alloc(void)
L
Linus Torvalds 已提交
1063
{
J
Jianjun Kong 已提交
1064
	struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1065

1066 1067
	if (c) {
		c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1068
		c->mfc_un.res.minvif = MAXVIFS;
1069
		refcount_set(&c->mfc_un.res.refcount, 1);
1070
	}
L
Linus Torvalds 已提交
1071 1072 1073
	return c;
}

1074
static struct mfc_cache *ipmr_cache_alloc_unres(void)
L
Linus Torvalds 已提交
1075
{
J
Jianjun Kong 已提交
1076
	struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1077 1078 1079 1080 1081

	if (c) {
		skb_queue_head_init(&c->mfc_un.unres.unresolved);
		c->mfc_un.unres.expires = jiffies + 10*HZ;
	}
L
Linus Torvalds 已提交
1082 1083 1084
	return c;
}

1085
/* A cache entry has gone into a resolved state from queued */
1086 1087
static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
			       struct mfc_cache *uc, struct mfc_cache *c)
L
Linus Torvalds 已提交
1088 1089
{
	struct sk_buff *skb;
1090
	struct nlmsgerr *e;
L
Linus Torvalds 已提交
1091

E
Eric Dumazet 已提交
1092
	/* Play the pending entries through our router */
J
Jianjun Kong 已提交
1093
	while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1094
		if (ip_hdr(skb)->version == 0) {
1095 1096
			struct nlmsghdr *nlh = skb_pull(skb,
							sizeof(struct iphdr));
L
Linus Torvalds 已提交
1097

1098
			if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
E
Eric Dumazet 已提交
1099 1100
				nlh->nlmsg_len = skb_tail_pointer(skb) -
						 (u8 *)nlh;
L
Linus Torvalds 已提交
1101 1102
			} else {
				nlh->nlmsg_type = NLMSG_ERROR;
1103
				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
L
Linus Torvalds 已提交
1104
				skb_trim(skb, nlh->nlmsg_len);
1105
				e = nlmsg_data(nlh);
1106 1107
				e->error = -EMSGSIZE;
				memset(&e->msg, 0, sizeof(e->msg));
L
Linus Torvalds 已提交
1108
			}
1109

1110
			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
E
Eric Dumazet 已提交
1111
		} else {
1112
			ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
E
Eric Dumazet 已提交
1113
		}
L
Linus Torvalds 已提交
1114 1115 1116
	}
}

1117
/* Bounce a cache query up to mrouted and netlink.
L
Linus Torvalds 已提交
1118
 *
1119
 * Called under mrt_lock.
L
Linus Torvalds 已提交
1120
 */
1121
static int ipmr_cache_report(struct mr_table *mrt,
1122
			     struct sk_buff *pkt, vifi_t vifi, int assert)
L
Linus Torvalds 已提交
1123
{
1124
	const int ihl = ip_hdrlen(pkt);
1125
	struct sock *mroute_sk;
L
Linus Torvalds 已提交
1126 1127
	struct igmphdr *igmp;
	struct igmpmsg *msg;
1128
	struct sk_buff *skb;
L
Linus Torvalds 已提交
1129 1130 1131 1132 1133 1134 1135
	int ret;

	if (assert == IGMPMSG_WHOLEPKT)
		skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
	else
		skb = alloc_skb(128, GFP_ATOMIC);

S
Stephen Hemminger 已提交
1136
	if (!skb)
L
Linus Torvalds 已提交
1137 1138 1139 1140
		return -ENOBUFS;

	if (assert == IGMPMSG_WHOLEPKT) {
		/* Ugly, but we have no choice with this interface.
E
Eric Dumazet 已提交
1141 1142 1143
		 * Duplicate old header, fix ihl, length etc.
		 * And all this only to mangle msg->im_msgtype and
		 * to set msg->im_mbz to "mbz" :-)
L
Linus Torvalds 已提交
1144
		 */
1145 1146
		skb_push(skb, sizeof(struct iphdr));
		skb_reset_network_header(skb);
1147
		skb_reset_transport_header(skb);
1148
		msg = (struct igmpmsg *)skb_network_header(skb);
1149
		memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
L
Linus Torvalds 已提交
1150 1151
		msg->im_msgtype = IGMPMSG_WHOLEPKT;
		msg->im_mbz = 0;
1152
		msg->im_vif = mrt->mroute_reg_vif_num;
1153 1154 1155
		ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
		ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
					     sizeof(struct iphdr));
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	} else {
		/* Copy the IP header */
		skb_set_network_header(skb, skb->len);
		skb_put(skb, ihl);
		skb_copy_to_linear_data(skb, pkt->data, ihl);
		/* Flag to the kernel this is a route add */
		ip_hdr(skb)->protocol = 0;
		msg = (struct igmpmsg *)skb_network_header(skb);
		msg->im_vif = vifi;
		skb_dst_set(skb, dst_clone(skb_dst(pkt)));
		/* Add our header */
1167
		igmp = skb_put(skb, sizeof(struct igmphdr));
1168 1169 1170 1171 1172
		igmp->type = assert;
		msg->im_msgtype = assert;
		igmp->code = 0;
		ip_hdr(skb)->tot_len = htons(skb->len);	/* Fix the length */
		skb->transport_header = skb->network_header;
1173
	}
L
Linus Torvalds 已提交
1174

E
Eric Dumazet 已提交
1175 1176
	rcu_read_lock();
	mroute_sk = rcu_dereference(mrt->mroute_sk);
1177
	if (!mroute_sk) {
E
Eric Dumazet 已提交
1178
		rcu_read_unlock();
L
Linus Torvalds 已提交
1179 1180 1181 1182
		kfree_skb(skb);
		return -EINVAL;
	}

1183 1184
	igmpmsg_netlink_event(mrt, skb);

E
Eric Dumazet 已提交
1185
	/* Deliver to mrouted */
E
Eric Dumazet 已提交
1186 1187
	ret = sock_queue_rcv_skb(mroute_sk, skb);
	rcu_read_unlock();
1188
	if (ret < 0) {
1189
		net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
L
Linus Torvalds 已提交
1190 1191 1192 1193 1194 1195
		kfree_skb(skb);
	}

	return ret;
}

1196 1197
/* Queue a packet for resolution. It gets locked cache entry! */
static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1198
				 struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
1199
{
1200 1201
	const struct iphdr *iph = ip_hdr(skb);
	struct mfc_cache *c;
1202
	bool found = false;
L
Linus Torvalds 已提交
1203 1204 1205
	int err;

	spin_lock_bh(&mfc_unres_lock);
1206
	list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1207
		if (c->mfc_mcastgrp == iph->daddr &&
1208 1209
		    c->mfc_origin == iph->saddr) {
			found = true;
L
Linus Torvalds 已提交
1210
			break;
1211
		}
L
Linus Torvalds 已提交
1212 1213
	}

1214
	if (!found) {
E
Eric Dumazet 已提交
1215
		/* Create a new entry if allowable */
1216
		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1217
		    (c = ipmr_cache_alloc_unres()) == NULL) {
L
Linus Torvalds 已提交
1218 1219 1220 1221 1222 1223
			spin_unlock_bh(&mfc_unres_lock);

			kfree_skb(skb);
			return -ENOBUFS;
		}

E
Eric Dumazet 已提交
1224
		/* Fill in the new cache entry */
1225 1226 1227
		c->mfc_parent	= -1;
		c->mfc_origin	= iph->saddr;
		c->mfc_mcastgrp	= iph->daddr;
L
Linus Torvalds 已提交
1228

E
Eric Dumazet 已提交
1229
		/* Reflect first query at mrouted. */
1230
		err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1231
		if (err < 0) {
1232
			/* If the report failed throw the cache entry
L
Linus Torvalds 已提交
1233 1234 1235 1236
			   out - Brad Parker
			 */
			spin_unlock_bh(&mfc_unres_lock);

1237
			ipmr_cache_free(c);
L
Linus Torvalds 已提交
1238 1239 1240 1241
			kfree_skb(skb);
			return err;
		}

1242 1243
		atomic_inc(&mrt->cache_resolve_queue_len);
		list_add(&c->list, &mrt->mfc_unres_queue);
1244
		mroute_netlink_event(mrt, c, RTM_NEWROUTE);
L
Linus Torvalds 已提交
1245

1246 1247
		if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
			mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
L
Linus Torvalds 已提交
1248 1249
	}

E
Eric Dumazet 已提交
1250 1251
	/* See if we can append the packet */
	if (c->mfc_un.unres.unresolved.qlen > 3) {
L
Linus Torvalds 已提交
1252 1253 1254
		kfree_skb(skb);
		err = -ENOBUFS;
	} else {
1255 1256 1257 1258
		if (dev) {
			skb->dev = dev;
			skb->skb_iif = dev->ifindex;
		}
J
Jianjun Kong 已提交
1259
		skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
L
Linus Torvalds 已提交
1260 1261 1262 1263 1264 1265 1266
		err = 0;
	}

	spin_unlock_bh(&mfc_unres_lock);
	return err;
}

1267
/* MFC cache manipulation by user space mroute daemon */
L
Linus Torvalds 已提交
1268

1269
static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
L
Linus Torvalds 已提交
1270
{
1271
	struct net *net = read_pnet(&mrt->net);
1272
	struct mfc_cache *c;
L
Linus Torvalds 已提交
1273

1274 1275 1276 1277 1278 1279 1280 1281 1282
	/* The entries are added/deleted only under RTNL */
	rcu_read_lock();
	c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
				   mfc->mfcc_mcastgrp.s_addr, parent);
	rcu_read_unlock();
	if (!c)
		return -ENOENT;
	rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
	list_del_rcu(&c->list);
1283
	call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
1284
	mroute_netlink_event(mrt, c, RTM_DELROUTE);
1285
	ipmr_cache_put(c);
L
Linus Torvalds 已提交
1286

1287
	return 0;
L
Linus Torvalds 已提交
1288 1289
}

1290
static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1291
			struct mfcctl *mfc, int mrtsock, int parent)
L
Linus Torvalds 已提交
1292
{
1293
	struct mfc_cache *uc, *c;
1294 1295
	bool found;
	int ret;
L
Linus Torvalds 已提交
1296

1297 1298 1299
	if (mfc->mfcc_parent >= MAXVIFS)
		return -ENFILE;

1300 1301 1302 1303 1304 1305
	/* The entries are added/deleted only under RTNL */
	rcu_read_lock();
	c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
				   mfc->mfcc_mcastgrp.s_addr, parent);
	rcu_read_unlock();
	if (c) {
L
Linus Torvalds 已提交
1306 1307
		write_lock_bh(&mrt_lock);
		c->mfc_parent = mfc->mfcc_parent;
1308
		ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
L
Linus Torvalds 已提交
1309 1310 1311
		if (!mrtsock)
			c->mfc_flags |= MFC_STATIC;
		write_unlock_bh(&mrt_lock);
1312 1313
		call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
					      mrt->id);
1314
		mroute_netlink_event(mrt, c, RTM_NEWROUTE);
L
Linus Torvalds 已提交
1315 1316 1317
		return 0;
	}

1318
	if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1319
	    !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
L
Linus Torvalds 已提交
1320 1321
		return -EINVAL;

1322
	c = ipmr_cache_alloc();
1323
	if (!c)
L
Linus Torvalds 已提交
1324 1325
		return -ENOMEM;

J
Jianjun Kong 已提交
1326 1327 1328
	c->mfc_origin = mfc->mfcc_origin.s_addr;
	c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
	c->mfc_parent = mfc->mfcc_parent;
1329
	ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
L
Linus Torvalds 已提交
1330 1331 1332
	if (!mrtsock)
		c->mfc_flags |= MFC_STATIC;

1333 1334 1335 1336 1337 1338 1339 1340
	ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->mnode,
				  ipmr_rht_params);
	if (ret) {
		pr_err("ipmr: rhtable insert error %d\n", ret);
		ipmr_cache_free(c);
		return ret;
	}
	list_add_tail_rcu(&c->list, &mrt->mfc_cache_list);
1341 1342
	/* Check to see if we resolved a queued list. If so we
	 * need to send on the frames and tidy up.
L
Linus Torvalds 已提交
1343
	 */
1344
	found = false;
L
Linus Torvalds 已提交
1345
	spin_lock_bh(&mfc_unres_lock);
1346
	list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1347
		if (uc->mfc_origin == c->mfc_origin &&
L
Linus Torvalds 已提交
1348
		    uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1349
			list_del(&uc->list);
1350
			atomic_dec(&mrt->cache_resolve_queue_len);
1351
			found = true;
L
Linus Torvalds 已提交
1352 1353 1354
			break;
		}
	}
1355 1356
	if (list_empty(&mrt->mfc_unres_queue))
		del_timer(&mrt->ipmr_expire_timer);
L
Linus Torvalds 已提交
1357 1358
	spin_unlock_bh(&mfc_unres_lock);

1359
	if (found) {
1360
		ipmr_cache_resolve(net, mrt, uc, c);
1361
		ipmr_cache_free(uc);
L
Linus Torvalds 已提交
1362
	}
1363
	call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id);
1364
	mroute_netlink_event(mrt, c, RTM_NEWROUTE);
L
Linus Torvalds 已提交
1365 1366 1367
	return 0;
}

1368
/* Close the multicast socket, and clear the vif tables etc */
1369
static void mroute_clean_tables(struct mr_table *mrt, bool all)
L
Linus Torvalds 已提交
1370
{
1371
	struct net *net = read_pnet(&mrt->net);
1372
	struct mfc_cache *c, *tmp;
1373
	LIST_HEAD(list);
1374
	int i;
1375

E
Eric Dumazet 已提交
1376
	/* Shut down all active vif entries */
1377
	for (i = 0; i < mrt->maxvif; i++) {
1378 1379 1380
		if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
			continue;
		vif_delete(mrt, i, 0, &list);
L
Linus Torvalds 已提交
1381
	}
1382
	unregister_netdevice_many(&list);
L
Linus Torvalds 已提交
1383

E
Eric Dumazet 已提交
1384
	/* Wipe the cache */
1385 1386 1387 1388 1389
	list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
		if (!all && (c->mfc_flags & MFC_STATIC))
			continue;
		rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
		list_del_rcu(&c->list);
1390 1391
		call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c,
					      mrt->id);
1392
		mroute_netlink_event(mrt, c, RTM_DELROUTE);
1393
		ipmr_cache_put(c);
L
Linus Torvalds 已提交
1394 1395
	}

1396
	if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
L
Linus Torvalds 已提交
1397
		spin_lock_bh(&mfc_unres_lock);
1398
		list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1399
			list_del(&c->list);
1400
			mroute_netlink_event(mrt, c, RTM_DELROUTE);
1401
			ipmr_destroy_unres(mrt, c);
L
Linus Torvalds 已提交
1402 1403 1404 1405 1406
		}
		spin_unlock_bh(&mfc_unres_lock);
	}
}

E
Eric Dumazet 已提交
1407 1408 1409
/* called from ip_ra_control(), before an RCU grace period,
 * we dont need to call synchronize_rcu() here
 */
L
Linus Torvalds 已提交
1410 1411
static void mrtsock_destruct(struct sock *sk)
{
1412
	struct net *net = sock_net(sk);
1413
	struct mr_table *mrt;
1414

1415
	ASSERT_RTNL();
1416
	ipmr_for_each_table(mrt, net) {
E
Eric Dumazet 已提交
1417
		if (sk == rtnl_dereference(mrt->mroute_sk)) {
1418
			IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1419 1420
			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
						    NETCONFA_MC_FORWARDING,
1421 1422
						    NETCONFA_IFINDEX_ALL,
						    net->ipv4.devconf_all);
1423
			RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1424
			mroute_clean_tables(mrt, false);
1425
		}
L
Linus Torvalds 已提交
1426 1427 1428
	}
}

1429 1430 1431 1432
/* Socket options and virtual interface manipulation. The whole
 * virtual interface system is a complete heap, but unfortunately
 * that's how BSD mrouted happens to think. Maybe one day with a proper
 * MOSPF/PIM router set up we can clean this up.
L
Linus Torvalds 已提交
1433
 */
1434

1435 1436
int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
			 unsigned int optlen)
L
Linus Torvalds 已提交
1437
{
1438
	struct net *net = sock_net(sk);
1439
	int val, ret = 0, parent = 0;
1440
	struct mr_table *mrt;
1441 1442 1443
	struct vifctl vif;
	struct mfcctl mfc;
	u32 uval;
1444

1445 1446
	/* There's one exception to the lock - MRT_DONE which needs to unlock */
	rtnl_lock();
1447
	if (sk->sk_type != SOCK_RAW ||
1448 1449 1450 1451
	    inet_sk(sk)->inet_num != IPPROTO_IGMP) {
		ret = -EOPNOTSUPP;
		goto out_unlock;
	}
1452

1453
	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1454 1455 1456 1457
	if (!mrt) {
		ret = -ENOENT;
		goto out_unlock;
	}
S
Stephen Hemminger 已提交
1458
	if (optname != MRT_INIT) {
1459
		if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1460 1461 1462 1463
		    !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
			ret = -EACCES;
			goto out_unlock;
		}
L
Linus Torvalds 已提交
1464 1465
	}

S
Stephen Hemminger 已提交
1466 1467
	switch (optname) {
	case MRT_INIT:
1468
		if (optlen != sizeof(int)) {
1469
			ret = -EINVAL;
1470 1471 1472
			break;
		}
		if (rtnl_dereference(mrt->mroute_sk)) {
1473 1474
			ret = -EADDRINUSE;
			break;
1475
		}
S
Stephen Hemminger 已提交
1476 1477 1478

		ret = ip_ra_control(sk, 1, mrtsock_destruct);
		if (ret == 0) {
1479
			rcu_assign_pointer(mrt->mroute_sk, sk);
1480
			IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1481 1482
			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
						    NETCONFA_MC_FORWARDING,
1483 1484
						    NETCONFA_IFINDEX_ALL,
						    net->ipv4.devconf_all);
S
Stephen Hemminger 已提交
1485
		}
1486
		break;
S
Stephen Hemminger 已提交
1487
	case MRT_DONE:
1488 1489 1490 1491
		if (sk != rcu_access_pointer(mrt->mroute_sk)) {
			ret = -EACCES;
		} else {
			ret = ip_ra_control(sk, 0, NULL);
1492
			goto out_unlock;
1493 1494
		}
		break;
S
Stephen Hemminger 已提交
1495 1496
	case MRT_ADD_VIF:
	case MRT_DEL_VIF:
1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
		if (optlen != sizeof(vif)) {
			ret = -EINVAL;
			break;
		}
		if (copy_from_user(&vif, optval, sizeof(vif))) {
			ret = -EFAULT;
			break;
		}
		if (vif.vifc_vifi >= MAXVIFS) {
			ret = -ENFILE;
			break;
		}
J
Jianjun Kong 已提交
1509
		if (optname == MRT_ADD_VIF) {
E
Eric Dumazet 已提交
1510 1511
			ret = vif_add(net, mrt, &vif,
				      sk == rtnl_dereference(mrt->mroute_sk));
S
Stephen Hemminger 已提交
1512
		} else {
1513
			ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
S
Stephen Hemminger 已提交
1514
		}
1515
		break;
1516 1517 1518
	/* Manipulate the forwarding caches. These live
	 * in a sort of kernel/user symbiosis.
	 */
S
Stephen Hemminger 已提交
1519 1520
	case MRT_ADD_MFC:
	case MRT_DEL_MFC:
1521
		parent = -1;
1522
		/* fall through */
1523 1524
	case MRT_ADD_MFC_PROXY:
	case MRT_DEL_MFC_PROXY:
1525 1526 1527 1528 1529 1530 1531 1532
		if (optlen != sizeof(mfc)) {
			ret = -EINVAL;
			break;
		}
		if (copy_from_user(&mfc, optval, sizeof(mfc))) {
			ret = -EFAULT;
			break;
		}
1533 1534 1535 1536
		if (parent == 0)
			parent = mfc.mfcc_parent;
		if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
			ret = ipmr_mfc_delete(mrt, &mfc, parent);
S
Stephen Hemminger 已提交
1537
		else
E
Eric Dumazet 已提交
1538
			ret = ipmr_mfc_add(net, mrt, &mfc,
1539 1540
					   sk == rtnl_dereference(mrt->mroute_sk),
					   parent);
1541
		break;
1542
	/* Control PIM assert. */
S
Stephen Hemminger 已提交
1543
	case MRT_ASSERT:
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
		if (optlen != sizeof(val)) {
			ret = -EINVAL;
			break;
		}
		if (get_user(val, (int __user *)optval)) {
			ret = -EFAULT;
			break;
		}
		mrt->mroute_do_assert = val;
		break;
S
Stephen Hemminger 已提交
1554
	case MRT_PIM:
1555
		if (!ipmr_pimsm_enabled()) {
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
			ret = -ENOPROTOOPT;
			break;
		}
		if (optlen != sizeof(val)) {
			ret = -EINVAL;
			break;
		}
		if (get_user(val, (int __user *)optval)) {
			ret = -EFAULT;
			break;
		}
S
Stephen Hemminger 已提交
1567

1568 1569 1570 1571
		val = !!val;
		if (val != mrt->mroute_do_pim) {
			mrt->mroute_do_pim = val;
			mrt->mroute_do_assert = val;
L
Linus Torvalds 已提交
1572
		}
1573
		break;
1574
	case MRT_TABLE:
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
		if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
			ret = -ENOPROTOOPT;
			break;
		}
		if (optlen != sizeof(uval)) {
			ret = -EINVAL;
			break;
		}
		if (get_user(uval, (u32 __user *)optval)) {
			ret = -EFAULT;
			break;
		}
1587

E
Eric Dumazet 已提交
1588 1589 1590
		if (sk == rtnl_dereference(mrt->mroute_sk)) {
			ret = -EBUSY;
		} else {
1591
			mrt = ipmr_new_table(net, uval);
1592 1593
			if (IS_ERR(mrt))
				ret = PTR_ERR(mrt);
1594
			else
1595
				raw_sk(sk)->ipmr_table = uval;
E
Eric Dumazet 已提交
1596
		}
1597
		break;
1598
	/* Spurious command, or MRT_VERSION which you cannot set. */
S
Stephen Hemminger 已提交
1599
	default:
1600
		ret = -ENOPROTOOPT;
L
Linus Torvalds 已提交
1601
	}
1602 1603 1604
out_unlock:
	rtnl_unlock();
	return ret;
L
Linus Torvalds 已提交
1605 1606
}

1607
/* Getsock opt support for the multicast routing system. */
J
Jianjun Kong 已提交
1608
int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
L
Linus Torvalds 已提交
1609 1610 1611
{
	int olr;
	int val;
1612
	struct net *net = sock_net(sk);
1613 1614
	struct mr_table *mrt;

1615 1616 1617 1618
	if (sk->sk_type != SOCK_RAW ||
	    inet_sk(sk)->inet_num != IPPROTO_IGMP)
		return -EOPNOTSUPP;

1619
	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1620
	if (!mrt)
1621
		return -ENOENT;
L
Linus Torvalds 已提交
1622

1623 1624 1625 1626 1627
	switch (optname) {
	case MRT_VERSION:
		val = 0x0305;
		break;
	case MRT_PIM:
1628
		if (!ipmr_pimsm_enabled())
1629 1630 1631 1632 1633 1634 1635
			return -ENOPROTOOPT;
		val = mrt->mroute_do_pim;
		break;
	case MRT_ASSERT:
		val = mrt->mroute_do_assert;
		break;
	default:
L
Linus Torvalds 已提交
1636
		return -ENOPROTOOPT;
1637
	}
L
Linus Torvalds 已提交
1638 1639 1640 1641 1642 1643

	if (get_user(olr, optlen))
		return -EFAULT;
	olr = min_t(unsigned int, olr, sizeof(int));
	if (olr < 0)
		return -EINVAL;
J
Jianjun Kong 已提交
1644
	if (put_user(olr, optlen))
L
Linus Torvalds 已提交
1645
		return -EFAULT;
J
Jianjun Kong 已提交
1646
	if (copy_to_user(optval, &val, olr))
L
Linus Torvalds 已提交
1647 1648 1649 1650
		return -EFAULT;
	return 0;
}

1651
/* The IP multicast ioctl support routines. */
L
Linus Torvalds 已提交
1652 1653 1654 1655 1656 1657
int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
{
	struct sioc_sg_req sr;
	struct sioc_vif_req vr;
	struct vif_device *vif;
	struct mfc_cache *c;
1658
	struct net *net = sock_net(sk);
1659 1660 1661
	struct mr_table *mrt;

	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1662
	if (!mrt)
1663
		return -ENOENT;
1664

S
Stephen Hemminger 已提交
1665 1666
	switch (cmd) {
	case SIOCGETVIFCNT:
J
Jianjun Kong 已提交
1667
		if (copy_from_user(&vr, arg, sizeof(vr)))
S
Stephen Hemminger 已提交
1668
			return -EFAULT;
1669
		if (vr.vifi >= mrt->maxvif)
S
Stephen Hemminger 已提交
1670 1671
			return -EINVAL;
		read_lock(&mrt_lock);
1672 1673
		vif = &mrt->vif_table[vr.vifi];
		if (VIF_EXISTS(mrt, vr.vifi)) {
J
Jianjun Kong 已提交
1674 1675 1676 1677
			vr.icount = vif->pkt_in;
			vr.ocount = vif->pkt_out;
			vr.ibytes = vif->bytes_in;
			vr.obytes = vif->bytes_out;
L
Linus Torvalds 已提交
1678 1679
			read_unlock(&mrt_lock);

J
Jianjun Kong 已提交
1680
			if (copy_to_user(arg, &vr, sizeof(vr)))
S
Stephen Hemminger 已提交
1681 1682 1683 1684 1685 1686
				return -EFAULT;
			return 0;
		}
		read_unlock(&mrt_lock);
		return -EADDRNOTAVAIL;
	case SIOCGETSGCNT:
J
Jianjun Kong 已提交
1687
		if (copy_from_user(&sr, arg, sizeof(sr)))
S
Stephen Hemminger 已提交
1688 1689
			return -EFAULT;

1690
		rcu_read_lock();
1691
		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
S
Stephen Hemminger 已提交
1692 1693 1694 1695
		if (c) {
			sr.pktcnt = c->mfc_un.res.pkt;
			sr.bytecnt = c->mfc_un.res.bytes;
			sr.wrong_if = c->mfc_un.res.wrong_if;
1696
			rcu_read_unlock();
S
Stephen Hemminger 已提交
1697

J
Jianjun Kong 已提交
1698
			if (copy_to_user(arg, &sr, sizeof(sr)))
S
Stephen Hemminger 已提交
1699 1700 1701
				return -EFAULT;
			return 0;
		}
1702
		rcu_read_unlock();
S
Stephen Hemminger 已提交
1703 1704 1705
		return -EADDRNOTAVAIL;
	default:
		return -ENOIOCTLCMD;
L
Linus Torvalds 已提交
1706 1707 1708
	}
}

1709 1710 1711 1712 1713 1714 1715 1716 1717
#ifdef CONFIG_COMPAT
struct compat_sioc_sg_req {
	struct in_addr src;
	struct in_addr grp;
	compat_ulong_t pktcnt;
	compat_ulong_t bytecnt;
	compat_ulong_t wrong_if;
};

1718 1719 1720 1721 1722 1723 1724 1725
struct compat_sioc_vif_req {
	vifi_t	vifi;		/* Which iface */
	compat_ulong_t icount;
	compat_ulong_t ocount;
	compat_ulong_t ibytes;
	compat_ulong_t obytes;
};

1726 1727
int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
{
1728
	struct compat_sioc_sg_req sr;
1729 1730
	struct compat_sioc_vif_req vr;
	struct vif_device *vif;
1731 1732 1733 1734 1735
	struct mfc_cache *c;
	struct net *net = sock_net(sk);
	struct mr_table *mrt;

	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1736
	if (!mrt)
1737 1738 1739
		return -ENOENT;

	switch (cmd) {
1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
	case SIOCGETVIFCNT:
		if (copy_from_user(&vr, arg, sizeof(vr)))
			return -EFAULT;
		if (vr.vifi >= mrt->maxvif)
			return -EINVAL;
		read_lock(&mrt_lock);
		vif = &mrt->vif_table[vr.vifi];
		if (VIF_EXISTS(mrt, vr.vifi)) {
			vr.icount = vif->pkt_in;
			vr.ocount = vif->pkt_out;
			vr.ibytes = vif->bytes_in;
			vr.obytes = vif->bytes_out;
			read_unlock(&mrt_lock);

			if (copy_to_user(arg, &vr, sizeof(vr)))
				return -EFAULT;
			return 0;
		}
		read_unlock(&mrt_lock);
		return -EADDRNOTAVAIL;
1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783
	case SIOCGETSGCNT:
		if (copy_from_user(&sr, arg, sizeof(sr)))
			return -EFAULT;

		rcu_read_lock();
		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
		if (c) {
			sr.pktcnt = c->mfc_un.res.pkt;
			sr.bytecnt = c->mfc_un.res.bytes;
			sr.wrong_if = c->mfc_un.res.wrong_if;
			rcu_read_unlock();

			if (copy_to_user(arg, &sr, sizeof(sr)))
				return -EFAULT;
			return 0;
		}
		rcu_read_unlock();
		return -EADDRNOTAVAIL;
	default:
		return -ENOIOCTLCMD;
	}
}
#endif

L
Linus Torvalds 已提交
1784 1785
static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
1786
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1787
	struct net *net = dev_net(dev);
1788
	struct mr_table *mrt;
L
Linus Torvalds 已提交
1789 1790
	struct vif_device *v;
	int ct;
1791

L
Linus Torvalds 已提交
1792 1793
	if (event != NETDEV_UNREGISTER)
		return NOTIFY_DONE;
1794 1795 1796 1797 1798

	ipmr_for_each_table(mrt, net) {
		v = &mrt->vif_table[0];
		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
			if (v->dev == dev)
1799
				vif_delete(mrt, ct, 1, NULL);
1800
		}
L
Linus Torvalds 已提交
1801 1802 1803 1804
	}
	return NOTIFY_DONE;
}

J
Jianjun Kong 已提交
1805
static struct notifier_block ip_mr_notifier = {
L
Linus Torvalds 已提交
1806 1807 1808
	.notifier_call = ipmr_device_event,
};

1809 1810 1811
/* Encapsulate a packet by attaching a valid IPIP header to it.
 * This avoids tunnel drivers and other mess and gives us the speed so
 * important for multicast video.
L
Linus Torvalds 已提交
1812
 */
1813 1814
static void ip_encap(struct net *net, struct sk_buff *skb,
		     __be32 saddr, __be32 daddr)
L
Linus Torvalds 已提交
1815
{
1816
	struct iphdr *iph;
1817
	const struct iphdr *old_iph = ip_hdr(skb);
1818 1819

	skb_push(skb, sizeof(struct iphdr));
1820
	skb->transport_header = skb->network_header;
1821
	skb_reset_network_header(skb);
1822
	iph = ip_hdr(skb);
L
Linus Torvalds 已提交
1823

E
Eric Dumazet 已提交
1824
	iph->version	=	4;
1825 1826
	iph->tos	=	old_iph->tos;
	iph->ttl	=	old_iph->ttl;
L
Linus Torvalds 已提交
1827 1828 1829 1830 1831 1832
	iph->frag_off	=	0;
	iph->daddr	=	daddr;
	iph->saddr	=	saddr;
	iph->protocol	=	IPPROTO_IPIP;
	iph->ihl	=	5;
	iph->tot_len	=	htons(skb->len);
1833
	ip_select_ident(net, skb, NULL);
L
Linus Torvalds 已提交
1834 1835 1836 1837 1838 1839
	ip_send_check(iph);

	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
	nf_reset(skb);
}

1840 1841
static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
				      struct sk_buff *skb)
L
Linus Torvalds 已提交
1842
{
E
Eric Dumazet 已提交
1843
	struct ip_options *opt = &(IPCB(skb)->opt);
L
Linus Torvalds 已提交
1844

1845 1846
	IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
	IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
L
Linus Torvalds 已提交
1847 1848 1849 1850

	if (unlikely(opt->optlen))
		ip_forward_options(skb);

1851
	return dst_output(net, sk, skb);
L
Linus Torvalds 已提交
1852 1853
}

1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
#ifdef CONFIG_NET_SWITCHDEV
static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
				   int in_vifi, int out_vifi)
{
	struct vif_device *out_vif = &mrt->vif_table[out_vifi];
	struct vif_device *in_vif = &mrt->vif_table[in_vifi];

	if (!skb->offload_mr_fwd_mark)
		return false;
	if (!out_vif->dev_parent_id.id_len || !in_vif->dev_parent_id.id_len)
		return false;
	return netdev_phys_item_id_same(&out_vif->dev_parent_id,
					&in_vif->dev_parent_id);
}
#else
static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
				   int in_vifi, int out_vifi)
{
	return false;
}
#endif

1876
/* Processing handlers for ipmr_forward */
L
Linus Torvalds 已提交
1877

1878
static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1879 1880
			    int in_vifi, struct sk_buff *skb,
			    struct mfc_cache *c, int vifi)
L
Linus Torvalds 已提交
1881
{
1882
	const struct iphdr *iph = ip_hdr(skb);
1883
	struct vif_device *vif = &mrt->vif_table[vifi];
L
Linus Torvalds 已提交
1884 1885
	struct net_device *dev;
	struct rtable *rt;
1886
	struct flowi4 fl4;
L
Linus Torvalds 已提交
1887 1888
	int    encap = 0;

1889
	if (!vif->dev)
L
Linus Torvalds 已提交
1890 1891 1892 1893
		goto out_free;

	if (vif->flags & VIFF_REGISTER) {
		vif->pkt_out++;
J
Jianjun Kong 已提交
1894
		vif->bytes_out += skb->len;
1895 1896
		vif->dev->stats.tx_bytes += skb->len;
		vif->dev->stats.tx_packets++;
1897
		ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1898
		goto out_free;
L
Linus Torvalds 已提交
1899 1900
	}

1901 1902 1903
	if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
		goto out_free;

E
Eric Dumazet 已提交
1904
	if (vif->flags & VIFF_TUNNEL) {
1905
		rt = ip_route_output_ports(net, &fl4, NULL,
1906 1907 1908 1909
					   vif->remote, vif->local,
					   0, 0,
					   IPPROTO_IPIP,
					   RT_TOS(iph->tos), vif->link);
1910
		if (IS_ERR(rt))
L
Linus Torvalds 已提交
1911 1912 1913
			goto out_free;
		encap = sizeof(struct iphdr);
	} else {
1914
		rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1915 1916 1917
					   0, 0,
					   IPPROTO_IPIP,
					   RT_TOS(iph->tos), vif->link);
1918
		if (IS_ERR(rt))
L
Linus Torvalds 已提交
1919 1920 1921
			goto out_free;
	}

1922
	dev = rt->dst.dev;
L
Linus Torvalds 已提交
1923

1924
	if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
L
Linus Torvalds 已提交
1925
		/* Do not fragment multicasts. Alas, IPv4 does not
E
Eric Dumazet 已提交
1926 1927
		 * allow to send ICMP, so that packets will disappear
		 * to blackhole.
L
Linus Torvalds 已提交
1928
		 */
1929
		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
L
Linus Torvalds 已提交
1930 1931 1932 1933
		ip_rt_put(rt);
		goto out_free;
	}

1934
	encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
L
Linus Torvalds 已提交
1935 1936

	if (skb_cow(skb, encap)) {
1937
		ip_rt_put(rt);
L
Linus Torvalds 已提交
1938 1939 1940 1941
		goto out_free;
	}

	vif->pkt_out++;
J
Jianjun Kong 已提交
1942
	vif->bytes_out += skb->len;
L
Linus Torvalds 已提交
1943

E
Eric Dumazet 已提交
1944
	skb_dst_drop(skb);
1945
	skb_dst_set(skb, &rt->dst);
1946
	ip_decrease_ttl(ip_hdr(skb));
L
Linus Torvalds 已提交
1947 1948

	/* FIXME: forward and output firewalls used to be called here.
E
Eric Dumazet 已提交
1949 1950
	 * What do we do with netfilter? -- RR
	 */
L
Linus Torvalds 已提交
1951
	if (vif->flags & VIFF_TUNNEL) {
1952
		ip_encap(net, skb, vif->local, vif->remote);
L
Linus Torvalds 已提交
1953
		/* FIXME: extra output firewall step used to be here. --RR */
1954 1955
		vif->dev->stats.tx_packets++;
		vif->dev->stats.tx_bytes += skb->len;
L
Linus Torvalds 已提交
1956 1957
	}

1958
	IPCB(skb)->flags |= IPSKB_FORWARDED;
L
Linus Torvalds 已提交
1959

1960
	/* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
L
Linus Torvalds 已提交
1961 1962 1963 1964 1965 1966 1967 1968 1969
	 * not only before forwarding, but after forwarding on all output
	 * interfaces. It is clear, if mrouter runs a multicasting
	 * program, it should receive packets not depending to what interface
	 * program is joined.
	 * If we will not make it, the program will have to join on all
	 * interfaces. On the other hand, multihoming host (or router, but
	 * not mrouter) cannot join to more than one interface - it will
	 * result in receiving multiple packets.
	 */
1970 1971
	NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
		net, NULL, skb, skb->dev, dev,
L
Linus Torvalds 已提交
1972 1973 1974 1975 1976 1977 1978
		ipmr_forward_finish);
	return;

out_free:
	kfree_skb(skb);
}

1979
static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
L
Linus Torvalds 已提交
1980 1981
{
	int ct;
1982 1983 1984

	for (ct = mrt->maxvif-1; ct >= 0; ct--) {
		if (mrt->vif_table[ct].dev == dev)
L
Linus Torvalds 已提交
1985 1986 1987 1988 1989 1990
			break;
	}
	return ct;
}

/* "local" means that we should preserve one skb (for local delivery) */
1991
static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1992 1993
			  struct net_device *dev, struct sk_buff *skb,
			  struct mfc_cache *cache, int local)
L
Linus Torvalds 已提交
1994
{
1995
	int true_vifi = ipmr_find_vif(mrt, dev);
L
Linus Torvalds 已提交
1996 1997 1998 1999 2000 2001
	int psend = -1;
	int vif, ct;

	vif = cache->mfc_parent;
	cache->mfc_un.res.pkt++;
	cache->mfc_un.res.bytes += skb->len;
2002
	cache->mfc_un.res.lastuse = jiffies;
L
Linus Torvalds 已提交
2003

2004
	if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
		struct mfc_cache *cache_proxy;

		/* For an (*,G) entry, we only check that the incomming
		 * interface is part of the static tree.
		 */
		cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
		if (cache_proxy &&
		    cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
			goto forward;
	}

2016
	/* Wrong interface: drop packet and (maybe) send PIM assert. */
2017
	if (mrt->vif_table[vif].dev != dev) {
2018
		if (rt_is_output_route(skb_rtable(skb))) {
L
Linus Torvalds 已提交
2019
			/* It is our own packet, looped back.
E
Eric Dumazet 已提交
2020 2021 2022 2023 2024 2025 2026 2027 2028
			 * Very complicated situation...
			 *
			 * The best workaround until routing daemons will be
			 * fixed is not to redistribute packet, if it was
			 * send through wrong interface. It means, that
			 * multicast applications WILL NOT work for
			 * (S,G), which have default multicast route pointing
			 * to wrong oif. In any case, it is not a good
			 * idea to use multicasting applications on router.
L
Linus Torvalds 已提交
2029 2030 2031 2032 2033 2034
			 */
			goto dont_forward;
		}

		cache->mfc_un.res.wrong_if++;

2035
		if (true_vifi >= 0 && mrt->mroute_do_assert &&
L
Linus Torvalds 已提交
2036
		    /* pimsm uses asserts, when switching from RPT to SPT,
E
Eric Dumazet 已提交
2037 2038 2039
		     * so that we cannot check that packet arrived on an oif.
		     * It is bad, but otherwise we would need to move pretty
		     * large chunk of pimd to kernel. Ough... --ANK
L
Linus Torvalds 已提交
2040
		     */
2041
		    (mrt->mroute_do_pim ||
2042
		     cache->mfc_un.res.ttls[true_vifi] < 255) &&
2043
		    time_after(jiffies,
L
Linus Torvalds 已提交
2044 2045
			       cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
			cache->mfc_un.res.last_assert = jiffies;
2046
			ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
L
Linus Torvalds 已提交
2047 2048 2049 2050
		}
		goto dont_forward;
	}

2051
forward:
2052 2053
	mrt->vif_table[vif].pkt_in++;
	mrt->vif_table[vif].bytes_in += skb->len;
L
Linus Torvalds 已提交
2054

2055
	/* Forward the frame */
2056 2057
	if (cache->mfc_origin == htonl(INADDR_ANY) &&
	    cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
		if (true_vifi >= 0 &&
		    true_vifi != cache->mfc_parent &&
		    ip_hdr(skb)->ttl >
				cache->mfc_un.res.ttls[cache->mfc_parent]) {
			/* It's an (*,*) entry and the packet is not coming from
			 * the upstream: forward the packet to the upstream
			 * only.
			 */
			psend = cache->mfc_parent;
			goto last_forward;
		}
		goto dont_forward;
	}
E
Eric Dumazet 已提交
2071 2072
	for (ct = cache->mfc_un.res.maxvif - 1;
	     ct >= cache->mfc_un.res.minvif; ct--) {
2073
		/* For (*,G) entry, don't forward to the incoming interface */
2074 2075
		if ((cache->mfc_origin != htonl(INADDR_ANY) ||
		     ct != true_vifi) &&
2076
		    ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
L
Linus Torvalds 已提交
2077 2078
			if (psend != -1) {
				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
E
Eric Dumazet 已提交
2079

L
Linus Torvalds 已提交
2080
				if (skb2)
2081 2082
					ipmr_queue_xmit(net, mrt, true_vifi,
							skb2, cache, psend);
L
Linus Torvalds 已提交
2083
			}
J
Jianjun Kong 已提交
2084
			psend = ct;
L
Linus Torvalds 已提交
2085 2086
		}
	}
2087
last_forward:
L
Linus Torvalds 已提交
2088 2089 2090
	if (psend != -1) {
		if (local) {
			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
E
Eric Dumazet 已提交
2091

L
Linus Torvalds 已提交
2092
			if (skb2)
2093 2094
				ipmr_queue_xmit(net, mrt, true_vifi, skb2,
						cache, psend);
L
Linus Torvalds 已提交
2095
		} else {
2096
			ipmr_queue_xmit(net, mrt, true_vifi, skb, cache, psend);
2097
			return;
L
Linus Torvalds 已提交
2098 2099 2100 2101 2102 2103 2104 2105
		}
	}

dont_forward:
	if (!local)
		kfree_skb(skb);
}

2106
static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
2107
{
2108 2109
	struct rtable *rt = skb_rtable(skb);
	struct iphdr *iph = ip_hdr(skb);
D
David S. Miller 已提交
2110
	struct flowi4 fl4 = {
2111 2112
		.daddr = iph->daddr,
		.saddr = iph->saddr,
2113
		.flowi4_tos = RT_TOS(iph->tos),
D
David S. Miller 已提交
2114 2115 2116
		.flowi4_oif = (rt_is_output_route(rt) ?
			       skb->dev->ifindex : 0),
		.flowi4_iif = (rt_is_output_route(rt) ?
2117
			       LOOPBACK_IFINDEX :
D
David S. Miller 已提交
2118
			       skb->dev->ifindex),
2119
		.flowi4_mark = skb->mark,
2120 2121 2122 2123
	};
	struct mr_table *mrt;
	int err;

D
David S. Miller 已提交
2124
	err = ipmr_fib_lookup(net, &fl4, &mrt);
2125 2126 2127 2128
	if (err)
		return ERR_PTR(err);
	return mrt;
}
L
Linus Torvalds 已提交
2129

2130 2131
/* Multicast packets for forwarding arrive here
 * Called with rcu_read_lock();
L
Linus Torvalds 已提交
2132 2133 2134 2135
 */
int ip_mr_input(struct sk_buff *skb)
{
	struct mfc_cache *cache;
2136
	struct net *net = dev_net(skb->dev);
E
Eric Dumazet 已提交
2137
	int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
2138
	struct mr_table *mrt;
2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
	struct net_device *dev;

	/* skb->dev passed in is the loX master dev for vrfs.
	 * As there are no vifs associated with loopback devices,
	 * get the proper interface that does have a vif associated with it.
	 */
	dev = skb->dev;
	if (netif_is_l3_master(skb->dev)) {
		dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
		if (!dev) {
			kfree_skb(skb);
			return -ENODEV;
		}
	}
L
Linus Torvalds 已提交
2153 2154

	/* Packet is looped back after forward, it should not be
E
Eric Dumazet 已提交
2155
	 * forwarded second time, but still can be delivered locally.
L
Linus Torvalds 已提交
2156
	 */
E
Eric Dumazet 已提交
2157
	if (IPCB(skb)->flags & IPSKB_FORWARDED)
L
Linus Torvalds 已提交
2158 2159
		goto dont_forward;

2160
	mrt = ipmr_rt_fib_lookup(net, skb);
2161 2162 2163
	if (IS_ERR(mrt)) {
		kfree_skb(skb);
		return PTR_ERR(mrt);
2164
	}
L
Linus Torvalds 已提交
2165
	if (!local) {
E
Eric Dumazet 已提交
2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
		if (IPCB(skb)->opt.router_alert) {
			if (ip_call_ra_chain(skb))
				return 0;
		} else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
			/* IGMPv1 (and broken IGMPv2 implementations sort of
			 * Cisco IOS <= 11.2(8)) do not put router alert
			 * option to IGMP packets destined to routable
			 * groups. It is very bad, because it means
			 * that we can forward NO IGMP messages.
			 */
			struct sock *mroute_sk;

			mroute_sk = rcu_dereference(mrt->mroute_sk);
			if (mroute_sk) {
				nf_reset(skb);
				raw_rcv(mroute_sk, skb);
				return 0;
			}
L
Linus Torvalds 已提交
2184 2185 2186
		    }
	}

2187
	/* already under rcu_read_lock() */
2188
	cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2189
	if (!cache) {
2190
		int vif = ipmr_find_vif(mrt, dev);
2191 2192 2193 2194 2195

		if (vif >= 0)
			cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
						    vif);
	}
L
Linus Torvalds 已提交
2196

2197
	/* No usable cache entry */
2198
	if (!cache) {
L
Linus Torvalds 已提交
2199 2200 2201 2202 2203
		int vif;

		if (local) {
			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
			ip_local_deliver(skb);
2204
			if (!skb2)
L
Linus Torvalds 已提交
2205 2206 2207 2208
				return -ENOBUFS;
			skb = skb2;
		}

2209
		read_lock(&mrt_lock);
2210
		vif = ipmr_find_vif(mrt, dev);
L
Linus Torvalds 已提交
2211
		if (vif >= 0) {
2212
			int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
L
Linus Torvalds 已提交
2213 2214
			read_unlock(&mrt_lock);

2215
			return err2;
L
Linus Torvalds 已提交
2216 2217 2218 2219 2220 2221
		}
		read_unlock(&mrt_lock);
		kfree_skb(skb);
		return -ENODEV;
	}

2222
	read_lock(&mrt_lock);
2223
	ip_mr_forward(net, mrt, dev, skb, cache, local);
L
Linus Torvalds 已提交
2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
	read_unlock(&mrt_lock);

	if (local)
		return ip_local_deliver(skb);

	return 0;

dont_forward:
	if (local)
		return ip_local_deliver(skb);
	kfree_skb(skb);
	return 0;
}

I
Ilpo Järvinen 已提交
2238
#ifdef CONFIG_IP_PIMSM_V1
2239
/* Handle IGMP messages of PIMv1 */
E
Eric Dumazet 已提交
2240
int pim_rcv_v1(struct sk_buff *skb)
I
Ilpo Järvinen 已提交
2241 2242
{
	struct igmphdr *pim;
2243
	struct net *net = dev_net(skb->dev);
2244
	struct mr_table *mrt;
I
Ilpo Järvinen 已提交
2245 2246 2247 2248 2249 2250

	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
		goto drop;

	pim = igmp_hdr(skb);

2251
	mrt = ipmr_rt_fib_lookup(net, skb);
2252 2253
	if (IS_ERR(mrt))
		goto drop;
2254
	if (!mrt->mroute_do_pim ||
I
Ilpo Järvinen 已提交
2255 2256 2257
	    pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
		goto drop;

2258
	if (__pim_rcv(mrt, skb, sizeof(*pim))) {
I
Ilpo Järvinen 已提交
2259 2260 2261
drop:
		kfree_skb(skb);
	}
L
Linus Torvalds 已提交
2262 2263 2264 2265 2266
	return 0;
}
#endif

#ifdef CONFIG_IP_PIMSM_V2
E
Eric Dumazet 已提交
2267
static int pim_rcv(struct sk_buff *skb)
L
Linus Torvalds 已提交
2268 2269
{
	struct pimreghdr *pim;
2270 2271
	struct net *net = dev_net(skb->dev);
	struct mr_table *mrt;
L
Linus Torvalds 已提交
2272

I
Ilpo Järvinen 已提交
2273
	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
L
Linus Torvalds 已提交
2274 2275
		goto drop;

2276
	pim = (struct pimreghdr *)skb_transport_header(skb);
2277
	if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
E
Eric Dumazet 已提交
2278
	    (pim->flags & PIM_NULL_REGISTER) ||
2279
	    (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2280
	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
L
Linus Torvalds 已提交
2281 2282
		goto drop;

2283
	mrt = ipmr_rt_fib_lookup(net, skb);
2284 2285
	if (IS_ERR(mrt))
		goto drop;
2286
	if (__pim_rcv(mrt, skb, sizeof(*pim))) {
I
Ilpo Järvinen 已提交
2287 2288 2289
drop:
		kfree_skb(skb);
	}
L
Linus Torvalds 已提交
2290 2291 2292 2293
	return 0;
}
#endif

2294 2295
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
			      struct mfc_cache *c, struct rtmsg *rtm)
L
Linus Torvalds 已提交
2296
{
2297
	struct rta_mfc_stats mfcs;
2298 2299
	struct nlattr *mp_attr;
	struct rtnexthop *nhp;
2300
	unsigned long lastuse;
2301
	int ct;
L
Linus Torvalds 已提交
2302

2303
	/* If cache is unresolved, don't try to parse IIF and OIF */
2304 2305
	if (c->mfc_parent >= MAXVIFS) {
		rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2306
		return -ENOENT;
2307
	}
2308

T
Thomas Graf 已提交
2309
	if (VIF_EXISTS(mrt, c->mfc_parent) &&
2310 2311
	    nla_put_u32(skb, RTA_IIF,
			mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
T
Thomas Graf 已提交
2312
		return -EMSGSIZE;
L
Linus Torvalds 已提交
2313

2314 2315 2316
	if (c->mfc_flags & MFC_OFFLOAD)
		rtm->rtm_flags |= RTNH_F_OFFLOAD;

T
Thomas Graf 已提交
2317 2318
	if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
		return -EMSGSIZE;
L
Linus Torvalds 已提交
2319 2320

	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2321
		if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2322 2323
			struct vif_device *vif;

T
Thomas Graf 已提交
2324 2325 2326 2327 2328
			if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
				nla_nest_cancel(skb, mp_attr);
				return -EMSGSIZE;
			}

L
Linus Torvalds 已提交
2329 2330
			nhp->rtnh_flags = 0;
			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2331 2332
			vif = &mrt->vif_table[ct];
			nhp->rtnh_ifindex = vif->dev->ifindex;
L
Linus Torvalds 已提交
2333 2334 2335
			nhp->rtnh_len = sizeof(*nhp);
		}
	}
T
Thomas Graf 已提交
2336 2337 2338

	nla_nest_end(skb, mp_attr);

2339 2340 2341
	lastuse = READ_ONCE(c->mfc_un.res.lastuse);
	lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;

2342 2343 2344
	mfcs.mfcs_packets = c->mfc_un.res.pkt;
	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2345
	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2346
	    nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2347
			      RTA_PAD))
2348 2349
		return -EMSGSIZE;

L
Linus Torvalds 已提交
2350 2351 2352 2353
	rtm->rtm_type = RTN_MULTICAST;
	return 1;
}

2354 2355
int ipmr_get_route(struct net *net, struct sk_buff *skb,
		   __be32 saddr, __be32 daddr,
2356
		   struct rtmsg *rtm, u32 portid)
L
Linus Torvalds 已提交
2357 2358
{
	struct mfc_cache *cache;
2359 2360
	struct mr_table *mrt;
	int err;
L
Linus Torvalds 已提交
2361

2362
	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2363
	if (!mrt)
2364 2365
		return -ENOENT;

2366
	rcu_read_lock();
2367
	cache = ipmr_cache_find(mrt, saddr, daddr);
2368
	if (!cache && skb->dev) {
2369
		int vif = ipmr_find_vif(mrt, skb->dev);
L
Linus Torvalds 已提交
2370

2371 2372 2373
		if (vif >= 0)
			cache = ipmr_cache_find_any(mrt, daddr, vif);
	}
2374
	if (!cache) {
2375
		struct sk_buff *skb2;
2376
		struct iphdr *iph;
L
Linus Torvalds 已提交
2377
		struct net_device *dev;
E
Eric Dumazet 已提交
2378
		int vif = -1;
L
Linus Torvalds 已提交
2379 2380

		dev = skb->dev;
2381
		read_lock(&mrt_lock);
E
Eric Dumazet 已提交
2382 2383 2384
		if (dev)
			vif = ipmr_find_vif(mrt, dev);
		if (vif < 0) {
L
Linus Torvalds 已提交
2385
			read_unlock(&mrt_lock);
2386
			rcu_read_unlock();
L
Linus Torvalds 已提交
2387 2388
			return -ENODEV;
		}
2389 2390 2391
		skb2 = skb_clone(skb, GFP_ATOMIC);
		if (!skb2) {
			read_unlock(&mrt_lock);
2392
			rcu_read_unlock();
2393 2394 2395
			return -ENOMEM;
		}

2396
		NETLINK_CB(skb2).portid = portid;
2397 2398
		skb_push(skb2, sizeof(struct iphdr));
		skb_reset_network_header(skb2);
2399 2400
		iph = ip_hdr(skb2);
		iph->ihl = sizeof(struct iphdr) >> 2;
2401 2402
		iph->saddr = saddr;
		iph->daddr = daddr;
2403
		iph->version = 0;
2404
		err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
L
Linus Torvalds 已提交
2405
		read_unlock(&mrt_lock);
2406
		rcu_read_unlock();
L
Linus Torvalds 已提交
2407 2408 2409
		return err;
	}

2410
	read_lock(&mrt_lock);
2411
	err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
L
Linus Torvalds 已提交
2412
	read_unlock(&mrt_lock);
2413
	rcu_read_unlock();
L
Linus Torvalds 已提交
2414 2415 2416
	return err;
}

2417
static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2418 2419
			    u32 portid, u32 seq, struct mfc_cache *c, int cmd,
			    int flags)
2420 2421 2422
{
	struct nlmsghdr *nlh;
	struct rtmsg *rtm;
2423
	int err;
2424

2425
	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2426
	if (!nlh)
2427 2428 2429 2430 2431 2432 2433 2434
		return -EMSGSIZE;

	rtm = nlmsg_data(nlh);
	rtm->rtm_family   = RTNL_FAMILY_IPMR;
	rtm->rtm_dst_len  = 32;
	rtm->rtm_src_len  = 32;
	rtm->rtm_tos      = 0;
	rtm->rtm_table    = mrt->id;
D
David S. Miller 已提交
2435 2436
	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
		goto nla_put_failure;
2437 2438
	rtm->rtm_type     = RTN_MULTICAST;
	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2439 2440 2441 2442
	if (c->mfc_flags & MFC_STATIC)
		rtm->rtm_protocol = RTPROT_STATIC;
	else
		rtm->rtm_protocol = RTPROT_MROUTED;
2443 2444
	rtm->rtm_flags    = 0;

2445 2446
	if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
	    nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
D
David S. Miller 已提交
2447
		goto nla_put_failure;
2448 2449 2450
	err = __ipmr_fill_mroute(mrt, skb, c, rtm);
	/* do not break the dump if cache is unresolved */
	if (err < 0 && err != -ENOENT)
2451 2452
		goto nla_put_failure;

2453 2454
	nlmsg_end(skb, nlh);
	return 0;
2455 2456 2457 2458 2459 2460

nla_put_failure:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475
static size_t mroute_msgsize(bool unresolved, int maxvif)
{
	size_t len =
		NLMSG_ALIGN(sizeof(struct rtmsg))
		+ nla_total_size(4)	/* RTA_TABLE */
		+ nla_total_size(4)	/* RTA_SRC */
		+ nla_total_size(4)	/* RTA_DST */
		;

	if (!unresolved)
		len = len
		      + nla_total_size(4)	/* RTA_IIF */
		      + nla_total_size(0)	/* RTA_MULTIPATH */
		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
						/* RTA_MFC_STATS */
2476
		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490
		;

	return len;
}

static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
				 int cmd)
{
	struct net *net = read_pnet(&mrt->net);
	struct sk_buff *skb;
	int err = -ENOBUFS;

	skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
			GFP_ATOMIC);
2491
	if (!skb)
2492 2493
		goto errout;

2494
	err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
	if (err < 0)
		goto errout;

	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
	return;

errout:
	kfree_skb(skb);
	if (err < 0)
		rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
}

2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569
static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
{
	size_t len =
		NLMSG_ALIGN(sizeof(struct rtgenmsg))
		+ nla_total_size(1)	/* IPMRA_CREPORT_MSGTYPE */
		+ nla_total_size(4)	/* IPMRA_CREPORT_VIF_ID */
		+ nla_total_size(4)	/* IPMRA_CREPORT_SRC_ADDR */
		+ nla_total_size(4)	/* IPMRA_CREPORT_DST_ADDR */
					/* IPMRA_CREPORT_PKT */
		+ nla_total_size(payloadlen)
		;

	return len;
}

static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
{
	struct net *net = read_pnet(&mrt->net);
	struct nlmsghdr *nlh;
	struct rtgenmsg *rtgenm;
	struct igmpmsg *msg;
	struct sk_buff *skb;
	struct nlattr *nla;
	int payloadlen;

	payloadlen = pkt->len - sizeof(struct igmpmsg);
	msg = (struct igmpmsg *)skb_network_header(pkt);

	skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
	if (!skb)
		goto errout;

	nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
			sizeof(struct rtgenmsg), 0);
	if (!nlh)
		goto errout;
	rtgenm = nlmsg_data(nlh);
	rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
	if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
	    nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif) ||
	    nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
			    msg->im_src.s_addr) ||
	    nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
			    msg->im_dst.s_addr))
		goto nla_put_failure;

	nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
	if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
				  nla_data(nla), payloadlen))
		goto nla_put_failure;

	nlmsg_end(skb, nlh);

	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
	return;

nla_put_failure:
	nlmsg_cancel(skb, nlh);
errout:
	kfree_skb(skb);
	rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
}

D
Donald Sharp 已提交
2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594
static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
			     struct netlink_ext_ack *extack)
{
	struct net *net = sock_net(in_skb->sk);
	struct nlattr *tb[RTA_MAX + 1];
	struct sk_buff *skb = NULL;
	struct mfc_cache *cache;
	struct mr_table *mrt;
	struct rtmsg *rtm;
	__be32 src, grp;
	u32 tableid;
	int err;

	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
			  rtm_ipv4_policy, extack);
	if (err < 0)
		goto errout;

	rtm = nlmsg_data(nlh);

	src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
	grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
	tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;

	mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
2595 2596
	if (!mrt) {
		err = -ENOENT;
D
Donald Sharp 已提交
2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
		goto errout_free;
	}

	/* entries are added/deleted only under RTNL */
	rcu_read_lock();
	cache = ipmr_cache_find(mrt, src, grp);
	rcu_read_unlock();
	if (!cache) {
		err = -ENOENT;
		goto errout_free;
	}

	skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL);
	if (!skb) {
		err = -ENOBUFS;
		goto errout_free;
	}

	err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
			       nlh->nlmsg_seq, cache,
			       RTM_NEWROUTE, 0);
	if (err < 0)
		goto errout_free;

	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);

errout:
	return err;

errout_free:
	kfree_skb(skb);
	goto errout;
}

2631 2632 2633 2634 2635 2636 2637 2638 2639
static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	struct mr_table *mrt;
	struct mfc_cache *mfc;
	unsigned int t = 0, s_t;
	unsigned int e = 0, s_e;

	s_t = cb->args[0];
2640
	s_e = cb->args[1];
2641

2642
	rcu_read_lock();
2643 2644 2645
	ipmr_for_each_table(mrt, net) {
		if (t < s_t)
			goto next_table;
2646 2647 2648 2649 2650 2651 2652 2653 2654
		list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
			if (e < s_e)
				goto next_entry;
			if (ipmr_fill_mroute(mrt, skb,
					     NETLINK_CB(cb->skb).portid,
					     cb->nlh->nlmsg_seq,
					     mfc, RTM_NEWROUTE,
					     NLM_F_MULTI) < 0)
				goto done;
2655
next_entry:
2656
			e++;
2657
		}
2658 2659 2660
		e = 0;
		s_e = 0;

2661 2662 2663 2664 2665 2666 2667
		spin_lock_bh(&mfc_unres_lock);
		list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
			if (e < s_e)
				goto next_entry2;
			if (ipmr_fill_mroute(mrt, skb,
					     NETLINK_CB(cb->skb).portid,
					     cb->nlh->nlmsg_seq,
2668 2669
					     mfc, RTM_NEWROUTE,
					     NLM_F_MULTI) < 0) {
2670 2671 2672 2673 2674 2675 2676
				spin_unlock_bh(&mfc_unres_lock);
				goto done;
			}
next_entry2:
			e++;
		}
		spin_unlock_bh(&mfc_unres_lock);
2677 2678
		e = 0;
		s_e = 0;
2679 2680 2681 2682
next_table:
		t++;
	}
done:
2683
	rcu_read_unlock();
2684

2685
	cb->args[1] = e;
2686 2687 2688 2689 2690
	cb->args[0] = t;

	return skb->len;
}

2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726
static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
	[RTA_SRC]	= { .type = NLA_U32 },
	[RTA_DST]	= { .type = NLA_U32 },
	[RTA_IIF]	= { .type = NLA_U32 },
	[RTA_TABLE]	= { .type = NLA_U32 },
	[RTA_MULTIPATH]	= { .len = sizeof(struct rtnexthop) },
};

static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
{
	switch (rtm_protocol) {
	case RTPROT_STATIC:
	case RTPROT_MROUTED:
		return true;
	}
	return false;
}

static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
{
	struct rtnexthop *rtnh = nla_data(nla);
	int remaining = nla_len(nla), vifi = 0;

	while (rtnh_ok(rtnh, remaining)) {
		mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
		if (++vifi == MAXVIFS)
			break;
		rtnh = rtnh_next(rtnh, &remaining);
	}

	return remaining > 0 ? -EINVAL : vifi;
}

/* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
			    struct mfcctl *mfcc, int *mrtsock,
2727 2728
			    struct mr_table **mrtret,
			    struct netlink_ext_ack *extack)
2729 2730 2731 2732 2733 2734 2735 2736
{
	struct net_device *dev = NULL;
	u32 tblid = RT_TABLE_DEFAULT;
	struct mr_table *mrt;
	struct nlattr *attr;
	struct rtmsg *rtm;
	int ret, rem;

2737
	ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy,
2738
			     extack);
2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796
	if (ret < 0)
		goto out;
	rtm = nlmsg_data(nlh);

	ret = -EINVAL;
	if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
	    rtm->rtm_type != RTN_MULTICAST ||
	    rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
	    !ipmr_rtm_validate_proto(rtm->rtm_protocol))
		goto out;

	memset(mfcc, 0, sizeof(*mfcc));
	mfcc->mfcc_parent = -1;
	ret = 0;
	nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
		switch (nla_type(attr)) {
		case RTA_SRC:
			mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
			break;
		case RTA_DST:
			mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
			break;
		case RTA_IIF:
			dev = __dev_get_by_index(net, nla_get_u32(attr));
			if (!dev) {
				ret = -ENODEV;
				goto out;
			}
			break;
		case RTA_MULTIPATH:
			if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
				ret = -EINVAL;
				goto out;
			}
			break;
		case RTA_PREFSRC:
			ret = 1;
			break;
		case RTA_TABLE:
			tblid = nla_get_u32(attr);
			break;
		}
	}
	mrt = ipmr_get_table(net, tblid);
	if (!mrt) {
		ret = -ENOENT;
		goto out;
	}
	*mrtret = mrt;
	*mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
	if (dev)
		mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);

out:
	return ret;
}

/* takes care of both newroute and delroute */
2797 2798
static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
2799 2800 2801 2802 2803 2804 2805 2806
{
	struct net *net = sock_net(skb->sk);
	int ret, mrtsock, parent;
	struct mr_table *tbl;
	struct mfcctl mfcc;

	mrtsock = 0;
	tbl = NULL;
2807
	ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack);
2808 2809 2810 2811 2812 2813 2814 2815 2816 2817
	if (ret < 0)
		return ret;

	parent = ret ? mfcc.mfcc_parent : -1;
	if (nlh->nlmsg_type == RTM_NEWROUTE)
		return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
	else
		return ipmr_mfc_delete(tbl, &mfcc, parent);
}

2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940
static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
{
	u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);

	if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
	    nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
	    nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
			mrt->mroute_reg_vif_num) ||
	    nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
		       mrt->mroute_do_assert) ||
	    nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim))
		return false;

	return true;
}

static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
{
	struct nlattr *vif_nest;
	struct vif_device *vif;

	/* if the VIF doesn't exist just continue */
	if (!VIF_EXISTS(mrt, vifid))
		return true;

	vif = &mrt->vif_table[vifid];
	vif_nest = nla_nest_start(skb, IPMRA_VIF);
	if (!vif_nest)
		return false;
	if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
	    nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
	    nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
	    nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
			      IPMRA_VIFA_PAD) ||
	    nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out,
			      IPMRA_VIFA_PAD) ||
	    nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in,
			      IPMRA_VIFA_PAD) ||
	    nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out,
			      IPMRA_VIFA_PAD) ||
	    nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
	    nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
		nla_nest_cancel(skb, vif_nest);
		return false;
	}
	nla_nest_end(skb, vif_nest);

	return true;
}

static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	struct nlmsghdr *nlh = NULL;
	unsigned int t = 0, s_t;
	unsigned int e = 0, s_e;
	struct mr_table *mrt;

	s_t = cb->args[0];
	s_e = cb->args[1];

	ipmr_for_each_table(mrt, net) {
		struct nlattr *vifs, *af;
		struct ifinfomsg *hdr;
		u32 i;

		if (t < s_t)
			goto skip_table;
		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
				cb->nlh->nlmsg_seq, RTM_NEWLINK,
				sizeof(*hdr), NLM_F_MULTI);
		if (!nlh)
			break;

		hdr = nlmsg_data(nlh);
		memset(hdr, 0, sizeof(*hdr));
		hdr->ifi_family = RTNL_FAMILY_IPMR;

		af = nla_nest_start(skb, IFLA_AF_SPEC);
		if (!af) {
			nlmsg_cancel(skb, nlh);
			goto out;
		}

		if (!ipmr_fill_table(mrt, skb)) {
			nlmsg_cancel(skb, nlh);
			goto out;
		}

		vifs = nla_nest_start(skb, IPMRA_TABLE_VIFS);
		if (!vifs) {
			nla_nest_end(skb, af);
			nlmsg_end(skb, nlh);
			goto out;
		}
		for (i = 0; i < mrt->maxvif; i++) {
			if (e < s_e)
				goto skip_entry;
			if (!ipmr_fill_vif(mrt, i, skb)) {
				nla_nest_end(skb, vifs);
				nla_nest_end(skb, af);
				nlmsg_end(skb, nlh);
				goto out;
			}
skip_entry:
			e++;
		}
		s_e = 0;
		e = 0;
		nla_nest_end(skb, vifs);
		nla_nest_end(skb, af);
		nlmsg_end(skb, nlh);
skip_table:
		t++;
	}

out:
	cb->args[1] = e;
	cb->args[0] = t;

	return skb->len;
}

2941
#ifdef CONFIG_PROC_FS
2942 2943
/* The /proc interfaces to multicast routing :
 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
L
Linus Torvalds 已提交
2944 2945
 */
struct ipmr_vif_iter {
2946
	struct seq_net_private p;
2947
	struct mr_table *mrt;
L
Linus Torvalds 已提交
2948 2949 2950
	int ct;
};

2951
static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2952 2953
					    struct ipmr_vif_iter *iter,
					    loff_t pos)
L
Linus Torvalds 已提交
2954
{
2955
	struct mr_table *mrt = iter->mrt;
2956 2957 2958

	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
		if (!VIF_EXISTS(mrt, iter->ct))
L
Linus Torvalds 已提交
2959
			continue;
2960
		if (pos-- == 0)
2961
			return &mrt->vif_table[iter->ct];
L
Linus Torvalds 已提交
2962 2963 2964 2965 2966
	}
	return NULL;
}

static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
S
Stephen Hemminger 已提交
2967
	__acquires(mrt_lock)
L
Linus Torvalds 已提交
2968
{
2969
	struct ipmr_vif_iter *iter = seq->private;
2970
	struct net *net = seq_file_net(seq);
2971 2972 2973
	struct mr_table *mrt;

	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2974
	if (!mrt)
2975 2976 2977
		return ERR_PTR(-ENOENT);

	iter->mrt = mrt;
2978

L
Linus Torvalds 已提交
2979
	read_lock(&mrt_lock);
2980
	return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
L
Linus Torvalds 已提交
2981 2982 2983 2984 2985 2986
		: SEQ_START_TOKEN;
}

static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct ipmr_vif_iter *iter = seq->private;
2987
	struct net *net = seq_file_net(seq);
2988
	struct mr_table *mrt = iter->mrt;
L
Linus Torvalds 已提交
2989 2990 2991

	++*pos;
	if (v == SEQ_START_TOKEN)
2992
		return ipmr_vif_seq_idx(net, iter, 0);
2993

2994 2995
	while (++iter->ct < mrt->maxvif) {
		if (!VIF_EXISTS(mrt, iter->ct))
L
Linus Torvalds 已提交
2996
			continue;
2997
		return &mrt->vif_table[iter->ct];
L
Linus Torvalds 已提交
2998 2999 3000 3001 3002
	}
	return NULL;
}

static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
S
Stephen Hemminger 已提交
3003
	__releases(mrt_lock)
L
Linus Torvalds 已提交
3004 3005 3006 3007 3008 3009
{
	read_unlock(&mrt_lock);
}

static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
{
3010 3011
	struct ipmr_vif_iter *iter = seq->private;
	struct mr_table *mrt = iter->mrt;
3012

L
Linus Torvalds 已提交
3013
	if (v == SEQ_START_TOKEN) {
3014
		seq_puts(seq,
L
Linus Torvalds 已提交
3015 3016 3017
			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags Local    Remote\n");
	} else {
		const struct vif_device *vif = v;
3018 3019
		const char *name =  vif->dev ?
				    vif->dev->name : "none";
L
Linus Torvalds 已提交
3020 3021

		seq_printf(seq,
3022
			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
3023
			   vif - mrt->vif_table,
3024
			   name, vif->bytes_in, vif->pkt_in,
L
Linus Torvalds 已提交
3025 3026 3027 3028 3029 3030
			   vif->bytes_out, vif->pkt_out,
			   vif->flags, vif->local, vif->remote);
	}
	return 0;
}

3031
static const struct seq_operations ipmr_vif_seq_ops = {
L
Linus Torvalds 已提交
3032 3033 3034 3035 3036 3037 3038 3039
	.start = ipmr_vif_seq_start,
	.next  = ipmr_vif_seq_next,
	.stop  = ipmr_vif_seq_stop,
	.show  = ipmr_vif_seq_show,
};

static int ipmr_vif_open(struct inode *inode, struct file *file)
{
3040 3041
	return seq_open_net(inode, file, &ipmr_vif_seq_ops,
			    sizeof(struct ipmr_vif_iter));
L
Linus Torvalds 已提交
3042 3043
}

3044
static const struct file_operations ipmr_vif_fops = {
L
Linus Torvalds 已提交
3045 3046 3047
	.open    = ipmr_vif_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
3048
	.release = seq_release_net,
L
Linus Torvalds 已提交
3049 3050 3051
};

struct ipmr_mfc_iter {
3052
	struct seq_net_private p;
3053
	struct mr_table *mrt;
3054
	struct list_head *cache;
L
Linus Torvalds 已提交
3055 3056
};

3057 3058
static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
					  struct ipmr_mfc_iter *it, loff_t pos)
L
Linus Torvalds 已提交
3059
{
3060
	struct mr_table *mrt = it->mrt;
L
Linus Torvalds 已提交
3061 3062
	struct mfc_cache *mfc;

3063
	rcu_read_lock();
3064 3065 3066 3067
	it->cache = &mrt->mfc_cache_list;
	list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
		if (pos-- == 0)
			return mfc;
3068
	rcu_read_unlock();
L
Linus Torvalds 已提交
3069 3070

	spin_lock_bh(&mfc_unres_lock);
3071
	it->cache = &mrt->mfc_unres_queue;
3072
	list_for_each_entry(mfc, it->cache, list)
3073
		if (pos-- == 0)
L
Linus Torvalds 已提交
3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084
			return mfc;
	spin_unlock_bh(&mfc_unres_lock);

	it->cache = NULL;
	return NULL;
}


static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
{
	struct ipmr_mfc_iter *it = seq->private;
3085
	struct net *net = seq_file_net(seq);
3086
	struct mr_table *mrt;
3087

3088
	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
3089
	if (!mrt)
3090
		return ERR_PTR(-ENOENT);
3091

3092
	it->mrt = mrt;
L
Linus Torvalds 已提交
3093
	it->cache = NULL;
3094
	return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
L
Linus Torvalds 已提交
3095 3096 3097 3098 3099 3100
		: SEQ_START_TOKEN;
}

static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct ipmr_mfc_iter *it = seq->private;
3101
	struct net *net = seq_file_net(seq);
3102
	struct mr_table *mrt = it->mrt;
3103
	struct mfc_cache *mfc = v;
L
Linus Torvalds 已提交
3104 3105 3106 3107

	++*pos;

	if (v == SEQ_START_TOKEN)
3108
		return ipmr_mfc_seq_idx(net, seq->private, 0);
L
Linus Torvalds 已提交
3109

3110 3111
	if (mfc->list.next != it->cache)
		return list_entry(mfc->list.next, struct mfc_cache, list);
3112

3113
	if (it->cache == &mrt->mfc_unres_queue)
L
Linus Torvalds 已提交
3114 3115 3116
		goto end_of_list;

	/* exhausted cache_array, show unresolved */
3117
	rcu_read_unlock();
3118
	it->cache = &mrt->mfc_unres_queue;
3119

L
Linus Torvalds 已提交
3120
	spin_lock_bh(&mfc_unres_lock);
3121 3122
	if (!list_empty(it->cache))
		return list_first_entry(it->cache, struct mfc_cache, list);
L
Linus Torvalds 已提交
3123

E
Eric Dumazet 已提交
3124
end_of_list:
L
Linus Torvalds 已提交
3125 3126 3127 3128 3129 3130 3131 3132 3133
	spin_unlock_bh(&mfc_unres_lock);
	it->cache = NULL;

	return NULL;
}

static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
{
	struct ipmr_mfc_iter *it = seq->private;
3134
	struct mr_table *mrt = it->mrt;
L
Linus Torvalds 已提交
3135

3136
	if (it->cache == &mrt->mfc_unres_queue)
L
Linus Torvalds 已提交
3137
		spin_unlock_bh(&mfc_unres_lock);
3138
	else if (it->cache == &mrt->mfc_cache_list)
3139
		rcu_read_unlock();
L
Linus Torvalds 已提交
3140 3141 3142 3143 3144 3145 3146
}

static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
{
	int n;

	if (v == SEQ_START_TOKEN) {
3147
		seq_puts(seq,
L
Linus Torvalds 已提交
3148 3149 3150 3151
		 "Group    Origin   Iif     Pkts    Bytes    Wrong Oifs\n");
	} else {
		const struct mfc_cache *mfc = v;
		const struct ipmr_mfc_iter *it = seq->private;
3152
		const struct mr_table *mrt = it->mrt;
3153

3154 3155 3156
		seq_printf(seq, "%08X %08X %-3hd",
			   (__force u32) mfc->mfc_mcastgrp,
			   (__force u32) mfc->mfc_origin,
3157
			   mfc->mfc_parent);
L
Linus Torvalds 已提交
3158

3159
		if (it->cache != &mrt->mfc_unres_queue) {
3160 3161 3162 3163
			seq_printf(seq, " %8lu %8lu %8lu",
				   mfc->mfc_un.res.pkt,
				   mfc->mfc_un.res.bytes,
				   mfc->mfc_un.res.wrong_if);
S
Stephen Hemminger 已提交
3164
			for (n = mfc->mfc_un.res.minvif;
E
Eric Dumazet 已提交
3165
			     n < mfc->mfc_un.res.maxvif; n++) {
3166
				if (VIF_EXISTS(mrt, n) &&
3167 3168
				    mfc->mfc_un.res.ttls[n] < 255)
					seq_printf(seq,
3169
					   " %2d:%-3d",
L
Linus Torvalds 已提交
3170 3171
					   n, mfc->mfc_un.res.ttls[n]);
			}
3172 3173 3174 3175 3176
		} else {
			/* unresolved mfc_caches don't contain
			 * pkt, bytes and wrong_if values
			 */
			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
L
Linus Torvalds 已提交
3177 3178 3179 3180 3181 3182
		}
		seq_putc(seq, '\n');
	}
	return 0;
}

3183
static const struct seq_operations ipmr_mfc_seq_ops = {
L
Linus Torvalds 已提交
3184 3185 3186 3187 3188 3189 3190 3191
	.start = ipmr_mfc_seq_start,
	.next  = ipmr_mfc_seq_next,
	.stop  = ipmr_mfc_seq_stop,
	.show  = ipmr_mfc_seq_show,
};

static int ipmr_mfc_open(struct inode *inode, struct file *file)
{
3192 3193
	return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
			    sizeof(struct ipmr_mfc_iter));
L
Linus Torvalds 已提交
3194 3195
}

3196
static const struct file_operations ipmr_mfc_fops = {
L
Linus Torvalds 已提交
3197 3198 3199
	.open    = ipmr_mfc_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
3200
	.release = seq_release_net,
L
Linus Torvalds 已提交
3201
};
3202
#endif
L
Linus Torvalds 已提交
3203 3204

#ifdef CONFIG_IP_PIMSM_V2
3205
static const struct net_protocol pim_protocol = {
L
Linus Torvalds 已提交
3206
	.handler	=	pim_rcv,
T
Tom Goff 已提交
3207
	.netns_ok	=	1,
L
Linus Torvalds 已提交
3208 3209 3210
};
#endif

3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259
static unsigned int ipmr_seq_read(struct net *net)
{
	ASSERT_RTNL();

	return net->ipv4.ipmr_seq + ipmr_rules_seq_read(net);
}

static int ipmr_dump(struct net *net, struct notifier_block *nb)
{
	struct mr_table *mrt;
	int err;

	err = ipmr_rules_dump(net, nb);
	if (err)
		return err;

	ipmr_for_each_table(mrt, net) {
		struct vif_device *v = &mrt->vif_table[0];
		struct mfc_cache *mfc;
		int vifi;

		/* Notifiy on table VIF entries */
		read_lock(&mrt_lock);
		for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
			if (!v->dev)
				continue;

			call_ipmr_vif_entry_notifier(nb, net, FIB_EVENT_VIF_ADD,
						     v, vifi, mrt->id);
		}
		read_unlock(&mrt_lock);

		/* Notify on table MFC entries */
		list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
			call_ipmr_mfc_entry_notifier(nb, net,
						     FIB_EVENT_ENTRY_ADD, mfc,
						     mrt->id);
	}

	return 0;
}

static const struct fib_notifier_ops ipmr_notifier_ops_template = {
	.family		= RTNL_FAMILY_IPMR,
	.fib_seq_read	= ipmr_seq_read,
	.fib_dump	= ipmr_dump,
	.owner		= THIS_MODULE,
};

3260
static int __net_init ipmr_notifier_init(struct net *net)
3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279
{
	struct fib_notifier_ops *ops;

	net->ipv4.ipmr_seq = 0;

	ops = fib_notifier_ops_register(&ipmr_notifier_ops_template, net);
	if (IS_ERR(ops))
		return PTR_ERR(ops);
	net->ipv4.ipmr_notifier_ops = ops;

	return 0;
}

static void __net_exit ipmr_notifier_exit(struct net *net)
{
	fib_notifier_ops_unregister(net->ipv4.ipmr_notifier_ops);
	net->ipv4.ipmr_notifier_ops = NULL;
}

3280
/* Setup for IP multicast routing */
3281 3282
static int __net_init ipmr_net_init(struct net *net)
{
3283
	int err;
3284

3285 3286 3287 3288
	err = ipmr_notifier_init(net);
	if (err)
		goto ipmr_notifier_fail;

3289 3290
	err = ipmr_rules_init(net);
	if (err < 0)
3291
		goto ipmr_rules_fail;
3292 3293 3294

#ifdef CONFIG_PROC_FS
	err = -ENOMEM;
3295
	if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
3296
		goto proc_vif_fail;
3297
	if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
3298 3299
		goto proc_cache_fail;
#endif
3300 3301
	return 0;

3302 3303
#ifdef CONFIG_PROC_FS
proc_cache_fail:
3304
	remove_proc_entry("ip_mr_vif", net->proc_net);
3305
proc_vif_fail:
3306
	ipmr_rules_exit(net);
3307
#endif
3308 3309 3310
ipmr_rules_fail:
	ipmr_notifier_exit(net);
ipmr_notifier_fail:
3311 3312 3313 3314 3315
	return err;
}

static void __net_exit ipmr_net_exit(struct net *net)
{
3316
#ifdef CONFIG_PROC_FS
3317 3318
	remove_proc_entry("ip_mr_cache", net->proc_net);
	remove_proc_entry("ip_mr_vif", net->proc_net);
3319
#endif
3320
	ipmr_notifier_exit(net);
3321
	ipmr_rules_exit(net);
3322 3323 3324 3325 3326
}

static struct pernet_operations ipmr_net_ops = {
	.init = ipmr_net_init,
	.exit = ipmr_net_exit,
3327
	.async = true,
3328
};
3329

W
Wang Chen 已提交
3330
int __init ip_mr_init(void)
L
Linus Torvalds 已提交
3331
{
W
Wang Chen 已提交
3332 3333
	int err;

L
Linus Torvalds 已提交
3334 3335
	mrt_cachep = kmem_cache_create("ip_mrt_cache",
				       sizeof(struct mfc_cache),
3336
				       0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
3337
				       NULL);
W
Wang Chen 已提交
3338

3339 3340 3341 3342
	err = register_pernet_subsys(&ipmr_net_ops);
	if (err)
		goto reg_pernet_fail;

W
Wang Chen 已提交
3343 3344 3345
	err = register_netdevice_notifier(&ip_mr_notifier);
	if (err)
		goto reg_notif_fail;
T
Tom Goff 已提交
3346 3347
#ifdef CONFIG_IP_PIMSM_V2
	if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
J
Joe Perches 已提交
3348
		pr_err("%s: can't add PIM protocol\n", __func__);
T
Tom Goff 已提交
3349 3350 3351 3352
		err = -EAGAIN;
		goto add_proto_fail;
	}
#endif
3353
	rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
3354
		      ipmr_rtm_getroute, ipmr_rtm_dumproute, 0);
3355
	rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
3356
		      ipmr_rtm_route, NULL, 0);
3357
	rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
3358
		      ipmr_rtm_route, NULL, 0);
3359 3360

	rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK,
3361
		      NULL, ipmr_rtm_dumplink, 0);
W
Wang Chen 已提交
3362
	return 0;
3363

T
Tom Goff 已提交
3364 3365 3366 3367
#ifdef CONFIG_IP_PIMSM_V2
add_proto_fail:
	unregister_netdevice_notifier(&ip_mr_notifier);
#endif
B
Benjamin Thery 已提交
3368
reg_notif_fail:
3369 3370
	unregister_pernet_subsys(&ipmr_net_ops);
reg_pernet_fail:
B
Benjamin Thery 已提交
3371
	kmem_cache_destroy(mrt_cachep);
W
Wang Chen 已提交
3372
	return err;
L
Linus Torvalds 已提交
3373
}