route.c 156.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3 4 5 6
/*
 *	Linux INET6 implementation
 *	FIB front-end.
 *
 *	Authors:
7
 *	Pedro Roque		<roque@di.fc.ul.pt>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18
 */

/*	Changes:
 *
 *	YOSHIFUJI Hideaki @USAGI
 *		reworked default router selection.
 *		- respect outgoing interface
 *		- select from (probably) reachable routers (i.e.
 *		routers in REACHABLE, STALE, DELAY or PROBE states).
 *		- always select the same router if it is (probably)
 *		reachable.  otherwise, round-robin the list.
19 20
 *	Ville Nuorvala
 *		Fixed routing subtrees.
L
Linus Torvalds 已提交
21 22
 */

23 24
#define pr_fmt(fmt) "IPv6: " fmt

25
#include <linux/capability.h>
L
Linus Torvalds 已提交
26
#include <linux/errno.h>
27
#include <linux/export.h>
L
Linus Torvalds 已提交
28 29 30 31 32 33 34 35
#include <linux/types.h>
#include <linux/times.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/route.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
36
#include <linux/mroute6.h>
L
Linus Torvalds 已提交
37 38 39 40
#include <linux/init.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
41
#include <linux/nsproxy.h>
42
#include <linux/slab.h>
43
#include <linux/jhash.h>
44
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
45 46 47 48 49 50 51 52 53
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/tcp.h>
#include <linux/rtnetlink.h>
#include <net/dst.h>
54
#include <net/dst_metadata.h>
L
Linus Torvalds 已提交
55
#include <net/xfrm.h>
56
#include <net/netevent.h>
57
#include <net/netlink.h>
58
#include <net/rtnh.h>
59
#include <net/lwtunnel.h>
60
#include <net/ip_tunnels.h>
D
David Ahern 已提交
61
#include <net/l3mdev.h>
62
#include <net/ip.h>
63
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
64 65 66 67 68

#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif

69 70 71 72 73 74 75
static int ip6_rt_type_to_error(u8 fib6_type);

#define CREATE_TRACE_POINTS
#include <trace/events/fib6.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
#undef CREATE_TRACE_POINTS

76
enum rt6_nud_state {
J
Jiri Benc 已提交
77 78 79
	RT6_NUD_FAIL_HARD = -3,
	RT6_NUD_FAIL_PROBE = -2,
	RT6_NUD_FAIL_DO_RR = -1,
80 81 82
	RT6_NUD_SUCCEED = 1
};

L
Linus Torvalds 已提交
83
static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
84
static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
85
static unsigned int	 ip6_mtu(const struct dst_entry *dst);
L
Linus Torvalds 已提交
86 87 88 89
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void		ip6_dst_destroy(struct dst_entry *);
static void		ip6_dst_ifdown(struct dst_entry *,
				       struct net_device *dev, int how);
90
static int		 ip6_dst_gc(struct dst_ops *ops);
L
Linus Torvalds 已提交
91 92

static int		ip6_pkt_discard(struct sk_buff *skb);
E
Eric W. Biederman 已提交
93
static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
94
static int		ip6_pkt_prohibit(struct sk_buff *skb);
E
Eric W. Biederman 已提交
95
static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
L
Linus Torvalds 已提交
96
static void		ip6_link_failure(struct sk_buff *skb);
97 98 99 100
static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
					   struct sk_buff *skb, u32 mtu);
static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
					struct sk_buff *skb);
101 102
static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
			   int strict);
103
static size_t rt6_nlmsg_size(struct fib6_info *f6i);
104
static int rt6_fill_node(struct net *net, struct sk_buff *skb,
105
			 struct fib6_info *rt, struct dst_entry *dst,
106
			 struct in6_addr *dest, struct in6_addr *src,
107 108
			 int iif, int type, u32 portid, u32 seq,
			 unsigned int flags);
109
static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
110 111
					   const struct in6_addr *daddr,
					   const struct in6_addr *saddr);
L
Linus Torvalds 已提交
112

113
#ifdef CONFIG_IPV6_ROUTE_INFO
114
static struct fib6_info *rt6_add_route_info(struct net *net,
115
					   const struct in6_addr *prefix, int prefixlen,
116 117
					   const struct in6_addr *gwaddr,
					   struct net_device *dev,
118
					   unsigned int pref);
119
static struct fib6_info *rt6_get_route_info(struct net *net,
120
					   const struct in6_addr *prefix, int prefixlen,
121 122
					   const struct in6_addr *gwaddr,
					   struct net_device *dev);
123 124
#endif

125 126 127 128 129 130 131
struct uncached_list {
	spinlock_t		lock;
	struct list_head	head;
};

static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);

132
void rt6_uncached_list_add(struct rt6_info *rt)
133 134 135 136 137 138 139 140 141 142
{
	struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);

	rt->rt6i_uncached_list = ul;

	spin_lock_bh(&ul->lock);
	list_add_tail(&rt->rt6i_uncached, &ul->head);
	spin_unlock_bh(&ul->lock);
}

143
void rt6_uncached_list_del(struct rt6_info *rt)
144 145 146
{
	if (!list_empty(&rt->rt6i_uncached)) {
		struct uncached_list *ul = rt->rt6i_uncached_list;
W
Wei Wang 已提交
147
		struct net *net = dev_net(rt->dst.dev);
148 149 150

		spin_lock_bh(&ul->lock);
		list_del(&rt->rt6i_uncached);
W
Wei Wang 已提交
151
		atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
152 153 154 155 156 157 158 159 160
		spin_unlock_bh(&ul->lock);
	}
}

static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
{
	struct net_device *loopback_dev = net->loopback_dev;
	int cpu;

161 162 163
	if (dev == loopback_dev)
		return;

164 165 166 167 168 169 170 171 172
	for_each_possible_cpu(cpu) {
		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
		struct rt6_info *rt;

		spin_lock_bh(&ul->lock);
		list_for_each_entry(rt, &ul->head, rt6i_uncached) {
			struct inet6_dev *rt_idev = rt->rt6i_idev;
			struct net_device *rt_dev = rt->dst.dev;

173
			if (rt_idev->dev == dev) {
174 175 176 177
				rt->rt6i_idev = in6_dev_get(loopback_dev);
				in6_dev_put(rt_idev);
			}

178
			if (rt_dev == dev) {
179
				rt->dst.dev = blackhole_netdev;
180 181 182 183 184 185 186 187
				dev_hold(rt->dst.dev);
				dev_put(rt_dev);
			}
		}
		spin_unlock_bh(&ul->lock);
	}
}

188
static inline const void *choose_neigh_daddr(const struct in6_addr *p,
189 190
					     struct sk_buff *skb,
					     const void *daddr)
191
{
D
David S. Miller 已提交
192
	if (!ipv6_addr_any(p))
193
		return (const void *) p;
194 195
	else if (skb)
		return &ipv6_hdr(skb)->daddr;
196 197 198
	return daddr;
}

199 200 201 202
struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
				   struct net_device *dev,
				   struct sk_buff *skb,
				   const void *daddr)
203
{
204 205
	struct neighbour *n;

206 207
	daddr = choose_neigh_daddr(gw, skb, daddr);
	n = __ipv6_neigh_lookup(dev, daddr);
208 209
	if (n)
		return n;
210 211 212

	n = neigh_create(&nd_tbl, daddr, dev);
	return IS_ERR(n) ? NULL : n;
213 214 215 216 217 218 219 220
}

static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
					      struct sk_buff *skb,
					      const void *daddr)
{
	const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);

221 222
	return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
				dst->dev, skb, daddr);
223 224
}

225 226 227 228 229
static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
	struct net_device *dev = dst->dev;
	struct rt6_info *rt = (struct rt6_info *)dst;

230
	daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr);
231 232 233 234 235 236 237 238 239
	if (!daddr)
		return;
	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
		return;
	if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
		return;
	__ipv6_confirm_neigh(dev, daddr);
}

240
static struct dst_ops ip6_dst_ops_template = {
L
Linus Torvalds 已提交
241 242 243 244
	.family			=	AF_INET6,
	.gc			=	ip6_dst_gc,
	.gc_thresh		=	1024,
	.check			=	ip6_dst_check,
245
	.default_advmss		=	ip6_default_advmss,
246
	.mtu			=	ip6_mtu,
247
	.cow_metrics		=	dst_cow_metrics_generic,
L
Linus Torvalds 已提交
248 249 250 251 252
	.destroy		=	ip6_dst_destroy,
	.ifdown			=	ip6_dst_ifdown,
	.negative_advice	=	ip6_negative_advice,
	.link_failure		=	ip6_link_failure,
	.update_pmtu		=	ip6_rt_update_pmtu,
253
	.redirect		=	rt6_do_redirect,
254
	.local_out		=	__ip6_local_out,
255
	.neigh_lookup		=	ip6_dst_neigh_lookup,
256
	.confirm_neigh		=	ip6_confirm_neigh,
L
Linus Torvalds 已提交
257 258
};

259
static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
260
{
261 262 263
	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);

	return mtu ? : dst->dev->mtu;
264 265
}

266 267
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
					 struct sk_buff *skb, u32 mtu)
268 269 270
{
}

271 272
static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
				      struct sk_buff *skb)
273 274 275
{
}

276 277 278 279
static struct dst_ops ip6_dst_blackhole_ops = {
	.family			=	AF_INET6,
	.destroy		=	ip6_dst_destroy,
	.check			=	ip6_dst_check,
280
	.mtu			=	ip6_blackhole_mtu,
281
	.default_advmss		=	ip6_default_advmss,
282
	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
283
	.redirect		=	ip6_rt_blackhole_redirect,
284
	.cow_metrics		=	dst_cow_metrics_generic,
285
	.neigh_lookup		=	ip6_dst_neigh_lookup,
286 287
};

288
static const u32 ip6_template_metrics[RTAX_MAX] = {
L
Li RongQing 已提交
289
	[RTAX_HOPLIMIT - 1] = 0,
290 291
};

292
static const struct fib6_info fib6_null_entry_template = {
293 294 295
	.fib6_flags	= (RTF_REJECT | RTF_NONEXTHOP),
	.fib6_protocol  = RTPROT_KERNEL,
	.fib6_metric	= ~(u32)0,
296
	.fib6_ref	= REFCOUNT_INIT(1),
D
David Ahern 已提交
297 298 299 300
	.fib6_type	= RTN_UNREACHABLE,
	.fib6_metrics	= (struct dst_metrics *)&dst_default_metrics,
};

301
static const struct rt6_info ip6_null_entry_template = {
302 303 304
	.dst = {
		.__refcnt	= ATOMIC_INIT(1),
		.__use		= 1,
305
		.obsolete	= DST_OBSOLETE_FORCE_CHK,
306 307 308
		.error		= -ENETUNREACH,
		.input		= ip6_pkt_discard,
		.output		= ip6_pkt_discard_out,
L
Linus Torvalds 已提交
309 310 311 312
	},
	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
};

T
Thomas Graf 已提交
313 314
#ifdef CONFIG_IPV6_MULTIPLE_TABLES

315
static const struct rt6_info ip6_prohibit_entry_template = {
316 317 318
	.dst = {
		.__refcnt	= ATOMIC_INIT(1),
		.__use		= 1,
319
		.obsolete	= DST_OBSOLETE_FORCE_CHK,
320 321 322
		.error		= -EACCES,
		.input		= ip6_pkt_prohibit,
		.output		= ip6_pkt_prohibit_out,
T
Thomas Graf 已提交
323 324 325 326
	},
	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
};

327
static const struct rt6_info ip6_blk_hole_entry_template = {
328 329 330
	.dst = {
		.__refcnt	= ATOMIC_INIT(1),
		.__use		= 1,
331
		.obsolete	= DST_OBSOLETE_FORCE_CHK,
332 333
		.error		= -EINVAL,
		.input		= dst_discard,
E
Eric W. Biederman 已提交
334
		.output		= dst_discard_out,
T
Thomas Graf 已提交
335 336 337 338 339 340
	},
	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
};

#endif

341 342 343 344 345 346 347 348
static void rt6_info_init(struct rt6_info *rt)
{
	struct dst_entry *dst = &rt->dst;

	memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
	INIT_LIST_HEAD(&rt->rt6i_uncached);
}

L
Linus Torvalds 已提交
349
/* allocate dst with ip6_dst_ops */
350 351
struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
			       int flags)
L
Linus Torvalds 已提交
352
{
353
	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
W
Wei Wang 已提交
354
					1, DST_OBSOLETE_FORCE_CHK, flags);
355

W
Wei Wang 已提交
356
	if (rt) {
357
		rt6_info_init(rt);
W
Wei Wang 已提交
358 359
		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
	}
360

361
	return rt;
L
Linus Torvalds 已提交
362
}
363
EXPORT_SYMBOL(ip6_dst_alloc);
M
Martin KaFai Lau 已提交
364

L
Linus Torvalds 已提交
365 366 367
static void ip6_dst_destroy(struct dst_entry *dst)
{
	struct rt6_info *rt = (struct rt6_info *)dst;
368
	struct fib6_info *from;
369
	struct inet6_dev *idev;
L
Linus Torvalds 已提交
370

371
	ip_dst_metrics_put(dst);
372 373 374
	rt6_uncached_list_del(rt);

	idev = rt->rt6i_idev;
375
	if (idev) {
L
Linus Torvalds 已提交
376 377
		rt->rt6i_idev = NULL;
		in6_dev_put(idev);
378
	}
379

380
	from = xchg((__force struct fib6_info **)&rt->from, NULL);
381
	fib6_info_release(from);
382 383
}

L
Linus Torvalds 已提交
384 385 386 387 388
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
			   int how)
{
	struct rt6_info *rt = (struct rt6_info *)dst;
	struct inet6_dev *idev = rt->rt6i_idev;
389
	struct net_device *loopback_dev =
390
		dev_net(dev)->loopback_dev;
L
Linus Torvalds 已提交
391

392 393 394 395 396
	if (idev && idev->dev != loopback_dev) {
		struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
		if (loopback_idev) {
			rt->rt6i_idev = loopback_idev;
			in6_dev_put(idev);
397
		}
L
Linus Torvalds 已提交
398 399 400
	}
}

401 402 403 404 405 406 407 408
static bool __rt6_check_expired(const struct rt6_info *rt)
{
	if (rt->rt6i_flags & RTF_EXPIRES)
		return time_after(jiffies, rt->dst.expires);
	else
		return false;
}

409
static bool rt6_check_expired(const struct rt6_info *rt)
L
Linus Torvalds 已提交
410
{
411 412 413 414
	struct fib6_info *from;

	from = rcu_dereference(rt->from);

415 416
	if (rt->rt6i_flags & RTF_EXPIRES) {
		if (time_after(jiffies, rt->dst.expires))
417
			return true;
418
	} else if (from) {
419
		return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
420
			fib6_check_expired(from);
421
	}
422
	return false;
L
Linus Torvalds 已提交
423 424
}

425 426 427
void fib6_select_path(const struct net *net, struct fib6_result *res,
		      struct flowi6 *fl6, int oif, bool have_oif_match,
		      const struct sk_buff *skb, int strict)
428
{
429
	struct fib6_info *sibling, *next_sibling;
430 431
	struct fib6_info *match = res->f6i;

432
	if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
433
		goto out;
434

435 436 437
	/* We might have already computed the hash for ICMPv6 errors. In such
	 * case it will always be non-zero. Otherwise now is the time to do it.
	 */
438 439
	if (!fl6->mp_hash &&
	    (!match->nh || nexthop_is_multipath(match->nh)))
440
		fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
441

442 443 444 445 446
	if (unlikely(match->nh)) {
		nexthop_path_fib6_result(res, fl6->mp_hash);
		return;
	}

447
	if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
448
		goto out;
449

450 451
	list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
				 fib6_siblings) {
452
		const struct fib6_nh *nh = sibling->fib6_nh;
453 454
		int nh_upper_bound;

455
		nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
456
		if (fl6->mp_hash > nh_upper_bound)
457
			continue;
458
		if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
459 460 461 462 463
			break;
		match = sibling;
		break;
	}

464 465
out:
	res->f6i = match;
466
	res->nh = match->fib6_nh;
467 468
}

L
Linus Torvalds 已提交
469
/*
470
 *	Route lookup. rcu_read_lock() should be held.
L
Linus Torvalds 已提交
471 472
 */

D
David Ahern 已提交
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
			       const struct in6_addr *saddr, int oif, int flags)
{
	const struct net_device *dev;

	if (nh->fib_nh_flags & RTNH_F_DEAD)
		return false;

	dev = nh->fib_nh_dev;
	if (oif) {
		if (dev->ifindex == oif)
			return true;
	} else {
		if (ipv6_chk_addr(net, saddr, dev,
				  flags & RT6_LOOKUP_F_IFACE))
			return true;
	}

	return false;
}

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
struct fib6_nh_dm_arg {
	struct net		*net;
	const struct in6_addr	*saddr;
	int			oif;
	int			flags;
	struct fib6_nh		*nh;
};

static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
{
	struct fib6_nh_dm_arg *arg = _arg;

	arg->nh = nh;
	return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
				  arg->flags);
}

/* returns fib6_nh from nexthop or NULL */
static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
					struct fib6_result *res,
					const struct in6_addr *saddr,
					int oif, int flags)
{
	struct fib6_nh_dm_arg arg = {
		.net   = net,
		.saddr = saddr,
		.oif   = oif,
		.flags = flags,
	};

	if (nexthop_is_blackhole(nh))
		return NULL;

	if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
		return arg.nh;

	return NULL;
}

533 534
static void rt6_device_match(struct net *net, struct fib6_result *res,
			     const struct in6_addr *saddr, int oif, int flags)
L
Linus Torvalds 已提交
535
{
536 537 538
	struct fib6_info *f6i = res->f6i;
	struct fib6_info *spf6i;
	struct fib6_nh *nh;
L
Linus Torvalds 已提交
539

540
	if (!oif && ipv6_addr_any(saddr)) {
541 542 543 544 545 546 547
		if (unlikely(f6i->nh)) {
			nh = nexthop_fib6_nh(f6i->nh);
			if (nexthop_is_blackhole(f6i->nh))
				goto out_blackhole;
		} else {
			nh = f6i->fib6_nh;
		}
548 549
		if (!(nh->fib_nh_flags & RTNH_F_DEAD))
			goto out;
550
	}
551

552
	for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
553 554 555 556 557 558 559 560 561 562 563 564 565
		bool matched = false;

		if (unlikely(spf6i->nh)) {
			nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
					      oif, flags);
			if (nh)
				matched = true;
		} else {
			nh = spf6i->fib6_nh;
			if (__rt6_device_match(net, nh, saddr, oif, flags))
				matched = true;
		}
		if (matched) {
566
			res->f6i = spf6i;
567
			goto out;
568
		}
569
	}
L
Linus Torvalds 已提交
570

571 572
	if (oif && flags & RT6_LOOKUP_F_IFACE) {
		res->f6i = net->ipv6.fib6_null_entry;
573
		nh = res->f6i->fib6_nh;
574
		goto out;
575
	}
576

577 578 579 580 581 582 583 584
	if (unlikely(f6i->nh)) {
		nh = nexthop_fib6_nh(f6i->nh);
		if (nexthop_is_blackhole(f6i->nh))
			goto out_blackhole;
	} else {
		nh = f6i->fib6_nh;
	}

585
	if (nh->fib_nh_flags & RTNH_F_DEAD) {
586
		res->f6i = net->ipv6.fib6_null_entry;
587
		nh = res->f6i->fib6_nh;
588
	}
589 590 591 592
out:
	res->nh = nh;
	res->fib6_type = res->f6i->fib6_type;
	res->fib6_flags = res->f6i->fib6_flags;
593 594 595 596 597 598
	return;

out_blackhole:
	res->fib6_flags |= RTF_REJECT;
	res->fib6_type = RTN_BLACKHOLE;
	res->nh = nh;
L
Linus Torvalds 已提交
599 600
}

601
#ifdef CONFIG_IPV6_ROUTER_PREF
602 603 604 605 606 607 608 609 610 611 612 613 614
struct __rt6_probe_work {
	struct work_struct work;
	struct in6_addr target;
	struct net_device *dev;
};

static void rt6_probe_deferred(struct work_struct *w)
{
	struct in6_addr mcaddr;
	struct __rt6_probe_work *work =
		container_of(w, struct __rt6_probe_work, work);

	addrconf_addr_solict_mult(&work->target, &mcaddr);
615
	ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
616
	dev_put(work->dev);
617
	kfree(work);
618 619
}

620
static void rt6_probe(struct fib6_nh *fib6_nh)
621
{
622
	struct __rt6_probe_work *work = NULL;
623
	const struct in6_addr *nh_gw;
624
	struct neighbour *neigh;
625
	struct net_device *dev;
626
	struct inet6_dev *idev;
627

628 629 630 631 632 633 634 635
	/*
	 * Okay, this does not seem to be appropriate
	 * for now, however, we need to check if it
	 * is really so; aka Router Reachability Probing.
	 *
	 * Router Reachability Probe MUST be rate-limited
	 * to no more than one per minute.
	 */
636
	if (fib6_nh->fib_nh_gw_family)
637
		return;
638

639 640
	nh_gw = &fib6_nh->fib_nh_gw6;
	dev = fib6_nh->fib_nh_dev;
641
	rcu_read_lock_bh();
642
	idev = __in6_dev_get(dev);
643
	neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
644
	if (neigh) {
645 646 647
		if (neigh->nud_state & NUD_VALID)
			goto out;

648
		write_lock(&neigh->lock);
649 650
		if (!(neigh->nud_state & NUD_VALID) &&
		    time_after(jiffies,
D
David Ahern 已提交
651
			       neigh->updated + idev->cnf.rtr_probe_interval)) {
652 653 654
			work = kmalloc(sizeof(*work), GFP_ATOMIC);
			if (work)
				__neigh_set_probe_once(neigh);
655
		}
656
		write_unlock(&neigh->lock);
657
	} else if (time_after(jiffies, fib6_nh->last_probe +
658
				       idev->cnf.rtr_probe_interval)) {
659
		work = kmalloc(sizeof(*work), GFP_ATOMIC);
660
	}
661 662

	if (work) {
663
		fib6_nh->last_probe = jiffies;
664
		INIT_WORK(&work->work, rt6_probe_deferred);
665 666 667
		work->target = *nh_gw;
		dev_hold(dev);
		work->dev = dev;
668 669 670
		schedule_work(&work->work);
	}

671
out:
672
	rcu_read_unlock_bh();
673 674
}
#else
675
static inline void rt6_probe(struct fib6_nh *fib6_nh)
676 677 678 679
{
}
#endif

L
Linus Torvalds 已提交
680
/*
681
 * Default Router Selection (RFC 2461 6.3.6)
L
Linus Torvalds 已提交
682
 */
683
static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
L
Linus Torvalds 已提交
684
{
685
	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
686
	struct neighbour *neigh;
687

688
	rcu_read_lock_bh();
689 690
	neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
					  &fib6_nh->fib_nh_gw6);
691 692
	if (neigh) {
		read_lock(&neigh->lock);
693
		if (neigh->nud_state & NUD_VALID)
694
			ret = RT6_NUD_SUCCEED;
695
#ifdef CONFIG_IPV6_ROUTER_PREF
696
		else if (!(neigh->nud_state & NUD_FAILED))
697
			ret = RT6_NUD_SUCCEED;
J
Jiri Benc 已提交
698 699
		else
			ret = RT6_NUD_FAIL_PROBE;
700
#endif
701
		read_unlock(&neigh->lock);
702 703
	} else {
		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
J
Jiri Benc 已提交
704
		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
705
	}
706 707
	rcu_read_unlock_bh();

708
	return ret;
L
Linus Torvalds 已提交
709 710
}

711 712
static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
			   int strict)
L
Linus Torvalds 已提交
713
{
D
David Ahern 已提交
714 715 716 717
	int m = 0;

	if (!oif || nh->fib_nh_dev->ifindex == oif)
		m = 2;
718

719
	if (!m && (strict & RT6_LOOKUP_F_IFACE))
720
		return RT6_NUD_FAIL_HARD;
721
#ifdef CONFIG_IPV6_ROUTER_PREF
722
	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
723
#endif
724
	if ((strict & RT6_LOOKUP_F_REACHABLE) &&
725
	    !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
726
		int n = rt6_check_neigh(nh);
727 728 729
		if (n < 0)
			return n;
	}
730 731 732
	return m;
}

D
David Ahern 已提交
733 734
static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
		       int oif, int strict, int *mpri, bool *do_rr)
735
{
736
	bool match_do_rr = false;
D
David Ahern 已提交
737 738
	bool rc = false;
	int m;
739

D
David Ahern 已提交
740
	if (nh->fib_nh_flags & RTNH_F_DEAD)
741 742
		goto out;

D
David Ahern 已提交
743 744
	if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
	    nh->fib_nh_flags & RTNH_F_LINKDOWN &&
745
	    !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
746
		goto out;
747

D
David Ahern 已提交
748
	m = rt6_score_route(nh, fib6_flags, oif, strict);
J
Jiri Benc 已提交
749
	if (m == RT6_NUD_FAIL_DO_RR) {
750 751
		match_do_rr = true;
		m = 0; /* lowest valid score */
J
Jiri Benc 已提交
752
	} else if (m == RT6_NUD_FAIL_HARD) {
753
		goto out;
754 755 756
	}

	if (strict & RT6_LOOKUP_F_REACHABLE)
D
David Ahern 已提交
757
		rt6_probe(nh);
758

J
Jiri Benc 已提交
759
	/* note that m can be RT6_NUD_FAIL_PROBE at this point */
760
	if (m > *mpri) {
761
		*do_rr = match_do_rr;
762
		*mpri = m;
D
David Ahern 已提交
763
		rc = true;
764 765
	}
out:
D
David Ahern 已提交
766
	return rc;
767 768
}

769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
struct fib6_nh_frl_arg {
	u32		flags;
	int		oif;
	int		strict;
	int		*mpri;
	bool		*do_rr;
	struct fib6_nh	*nh;
};

static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
{
	struct fib6_nh_frl_arg *arg = _arg;

	arg->nh = nh;
	return find_match(nh, arg->flags, arg->oif, arg->strict,
			  arg->mpri, arg->do_rr);
}

787
static void __find_rr_leaf(struct fib6_info *f6i_start,
D
David Ahern 已提交
788
			   struct fib6_info *nomatch, u32 metric,
789
			   struct fib6_result *res, struct fib6_info **cont,
D
David Ahern 已提交
790
			   int oif, int strict, bool *do_rr, int *mpri)
791
{
792
	struct fib6_info *f6i;
L
Linus Torvalds 已提交
793

794 795 796
	for (f6i = f6i_start;
	     f6i && f6i != nomatch;
	     f6i = rcu_dereference(f6i->fib6_next)) {
797
		bool matched = false;
D
David Ahern 已提交
798 799
		struct fib6_nh *nh;

800 801
		if (cont && f6i->fib6_metric != metric) {
			*cont = f6i;
D
David Ahern 已提交
802
			return;
803 804
		}

805
		if (fib6_check_expired(f6i))
D
David Ahern 已提交
806 807
			continue;

808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
		if (unlikely(f6i->nh)) {
			struct fib6_nh_frl_arg arg = {
				.flags  = f6i->fib6_flags,
				.oif    = oif,
				.strict = strict,
				.mpri   = mpri,
				.do_rr  = do_rr
			};

			if (nexthop_is_blackhole(f6i->nh)) {
				res->fib6_flags = RTF_REJECT;
				res->fib6_type = RTN_BLACKHOLE;
				res->f6i = f6i;
				res->nh = nexthop_fib6_nh(f6i->nh);
				return;
			}
			if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
						     &arg)) {
				matched = true;
				nh = arg.nh;
			}
		} else {
			nh = f6i->fib6_nh;
			if (find_match(nh, f6i->fib6_flags, oif, strict,
				       mpri, do_rr))
				matched = true;
		}
		if (matched) {
836 837
			res->f6i = f6i;
			res->nh = nh;
838 839
			res->fib6_flags = f6i->fib6_flags;
			res->fib6_type = f6i->fib6_type;
840
		}
841
	}
D
David Ahern 已提交
842
}
843

844 845 846
static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
			 struct fib6_info *rr_head, int oif, int strict,
			 bool *do_rr, struct fib6_result *res)
D
David Ahern 已提交
847
{
848 849
	u32 metric = rr_head->fib6_metric;
	struct fib6_info *cont = NULL;
D
David Ahern 已提交
850
	int mpri = -1;
851

852
	__find_rr_leaf(rr_head, NULL, metric, res, &cont,
D
David Ahern 已提交
853
		       oif, strict, do_rr, &mpri);
D
David Ahern 已提交
854

855
	__find_rr_leaf(leaf, rr_head, metric, res, &cont,
D
David Ahern 已提交
856
		       oif, strict, do_rr, &mpri);
857

858 859
	if (res->f6i || !cont)
		return;
860

861
	__find_rr_leaf(cont, NULL, metric, res, NULL,
D
David Ahern 已提交
862
		       oif, strict, do_rr, &mpri);
863
}
L
Linus Torvalds 已提交
864

865 866
static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
		       struct fib6_result *res, int strict)
867
{
868
	struct fib6_info *leaf = rcu_dereference(fn->leaf);
869
	struct fib6_info *rt0;
870
	bool do_rr = false;
871
	int key_plen;
L
Linus Torvalds 已提交
872

873 874 875
	/* make sure this function or its helpers sets f6i */
	res->f6i = NULL;

D
David Ahern 已提交
876
	if (!leaf || leaf == net->ipv6.fib6_null_entry)
877
		goto out;
W
Wei Wang 已提交
878

879
	rt0 = rcu_dereference(fn->rr_ptr);
880
	if (!rt0)
881
		rt0 = leaf;
L
Linus Torvalds 已提交
882

883 884 885 886 887
	/* Double check to make sure fn is not an intermediate node
	 * and fn->leaf does not points to its child's leaf
	 * (This might happen if all routes under fn are deleted from
	 * the tree and fib6_repair_tree() is called on the node.)
	 */
888
	key_plen = rt0->fib6_dst.plen;
889
#ifdef CONFIG_IPV6_SUBTREES
890 891
	if (rt0->fib6_src.plen)
		key_plen = rt0->fib6_src.plen;
892 893
#endif
	if (fn->fn_bit != key_plen)
894
		goto out;
L
Linus Torvalds 已提交
895

896
	find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
897
	if (do_rr) {
898
		struct fib6_info *next = rcu_dereference(rt0->fib6_next);
899

900
		/* no entries matched; do round-robin */
901
		if (!next || next->fib6_metric != rt0->fib6_metric)
W
Wei Wang 已提交
902
			next = leaf;
903

904
		if (next != rt0) {
905
			spin_lock_bh(&leaf->fib6_table->tb6_lock);
906
			/* make sure next is not being deleted from the tree */
907
			if (next->fib6_node)
908
				rcu_assign_pointer(fn->rr_ptr, next);
909
			spin_unlock_bh(&leaf->fib6_table->tb6_lock);
910
		}
L
Linus Torvalds 已提交
911 912
	}

913 914 915
out:
	if (!res->f6i) {
		res->f6i = net->ipv6.fib6_null_entry;
916
		res->nh = res->f6i->fib6_nh;
917 918
		res->fib6_flags = res->f6i->fib6_flags;
		res->fib6_type = res->f6i->fib6_type;
919
	}
L
Linus Torvalds 已提交
920 921
}

922
static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
923
{
924 925
	return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
	       res->nh->fib_nh_gw_family;
926 927
}

928 929
#ifdef CONFIG_IPV6_ROUTE_INFO
int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
930
		  const struct in6_addr *gwaddr)
931
{
932
	struct net *net = dev_net(dev);
933 934 935
	struct route_info *rinfo = (struct route_info *) opt;
	struct in6_addr prefix_buf, *prefix;
	unsigned int pref;
936
	unsigned long lifetime;
937
	struct fib6_info *rt;
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959

	if (len < sizeof(struct route_info)) {
		return -EINVAL;
	}

	/* Sanity check for prefix_len and length */
	if (rinfo->length > 3) {
		return -EINVAL;
	} else if (rinfo->prefix_len > 128) {
		return -EINVAL;
	} else if (rinfo->prefix_len > 64) {
		if (rinfo->length < 2) {
			return -EINVAL;
		}
	} else if (rinfo->prefix_len > 0) {
		if (rinfo->length < 1) {
			return -EINVAL;
		}
	}

	pref = rinfo->route_pref;
	if (pref == ICMPV6_ROUTER_PREF_INVALID)
960
		return -EINVAL;
961

962
	lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
963 964 965 966 967 968 969 970 971 972 973

	if (rinfo->length == 3)
		prefix = (struct in6_addr *)rinfo->prefix;
	else {
		/* this function is safe */
		ipv6_addr_prefix(&prefix_buf,
				 (struct in6_addr *)rinfo->prefix,
				 rinfo->prefix_len);
		prefix = &prefix_buf;
	}

974
	if (rinfo->prefix_len == 0)
975
		rt = rt6_get_dflt_router(net, gwaddr, dev);
976 977
	else
		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
978
					gwaddr, dev);
979 980

	if (rt && !lifetime) {
981
		ip6_del_rt(net, rt);
982 983 984 985
		rt = NULL;
	}

	if (!rt && lifetime)
986 987
		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
					dev, pref);
988
	else if (rt)
989 990
		rt->fib6_flags = RTF_ROUTEINFO |
				 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
991 992

	if (rt) {
993
		if (!addrconf_finite_timeout(lifetime))
994
			fib6_clean_expires(rt);
995
		else
996
			fib6_set_expires(rt, jiffies + HZ * lifetime);
997

998
		fib6_info_release(rt);
999 1000 1001 1002 1003
	}
	return 0;
}
#endif

1004 1005 1006 1007 1008
/*
 *	Misc support functions
 */

/* called with rcu_lock held */
1009
static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1010
{
1011
	struct net_device *dev = res->nh->fib_nh_dev;
1012

1013
	if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1014 1015 1016 1017 1018
		/* for copies of local routes, dst->dev needs to be the
		 * device if it is a master device, the master device if
		 * device is enslaved, and the loopback as the default
		 */
		if (netif_is_l3_slave(dev) &&
1019
		    !rt6_need_strict(&res->f6i->fib6_dst.addr))
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
			dev = l3mdev_master_dev_rcu(dev);
		else if (!netif_is_l3_master(dev))
			dev = dev_net(dev)->loopback_dev;
		/* last case is netif_is_l3_master(dev) is true in which
		 * case we want dev returned to be dev
		 */
	}

	return dev;
}

1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
static const int fib6_prop[RTN_MAX + 1] = {
	[RTN_UNSPEC]	= 0,
	[RTN_UNICAST]	= 0,
	[RTN_LOCAL]	= 0,
	[RTN_BROADCAST]	= 0,
	[RTN_ANYCAST]	= 0,
	[RTN_MULTICAST]	= 0,
	[RTN_BLACKHOLE]	= -EINVAL,
	[RTN_UNREACHABLE] = -EHOSTUNREACH,
	[RTN_PROHIBIT]	= -EACCES,
	[RTN_THROW]	= -EAGAIN,
	[RTN_NAT]	= -EINVAL,
	[RTN_XRESOLVE]	= -EINVAL,
};

static int ip6_rt_type_to_error(u8 fib6_type)
{
	return fib6_prop[fib6_type];
}

1051
static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
{
	unsigned short flags = 0;

	if (rt->dst_nocount)
		flags |= DST_NOCOUNT;
	if (rt->dst_nopolicy)
		flags |= DST_NOPOLICY;
	if (rt->dst_host)
		flags |= DST_HOST;

	return flags;
}

1065
static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1066
{
1067
	rt->dst.error = ip6_rt_type_to_error(fib6_type);
1068

1069
	switch (fib6_type) {
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
	case RTN_BLACKHOLE:
		rt->dst.output = dst_discard_out;
		rt->dst.input = dst_discard;
		break;
	case RTN_PROHIBIT:
		rt->dst.output = ip6_pkt_prohibit_out;
		rt->dst.input = ip6_pkt_prohibit;
		break;
	case RTN_THROW:
	case RTN_UNREACHABLE:
	default:
		rt->dst.output = ip6_pkt_discard_out;
		rt->dst.input = ip6_pkt_discard;
		break;
	}
}

1087
static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1088
{
1089
	struct fib6_info *f6i = res->f6i;
1090

1091 1092
	if (res->fib6_flags & RTF_REJECT) {
		ip6_rt_init_dst_reject(rt, res->fib6_type);
1093 1094 1095 1096 1097 1098
		return;
	}

	rt->dst.error = 0;
	rt->dst.output = ip6_output;

1099
	if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1100
		rt->dst.input = ip6_input;
1101
	} else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1102 1103 1104 1105 1106
		rt->dst.input = ip6_mc_input;
	} else {
		rt->dst.input = ip6_forward;
	}

1107 1108
	if (res->nh->fib_nh_lws) {
		rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1109 1110 1111 1112 1113 1114
		lwtunnel_set_redirect(&rt->dst);
	}

	rt->dst.lastuse = jiffies;
}

1115
/* Caller must already hold reference to @from */
1116
static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1117 1118
{
	rt->rt6i_flags &= ~RTF_EXPIRES;
1119
	rcu_assign_pointer(rt->from, from);
1120
	ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1121 1122
}

1123 1124
/* Caller must already hold reference to f6i in result */
static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1125
{
1126 1127 1128
	const struct fib6_nh *nh = res->nh;
	const struct net_device *dev = nh->fib_nh_dev;
	struct fib6_info *f6i = res->f6i;
D
David Ahern 已提交
1129

1130
	ip6_rt_init_dst(rt, res);
1131

1132
	rt->rt6i_dst = f6i->fib6_dst;
D
David Ahern 已提交
1133
	rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1134
	rt->rt6i_flags = res->fib6_flags;
1135 1136
	if (nh->fib_nh_gw_family) {
		rt->rt6i_gateway = nh->fib_nh_gw6;
1137 1138
		rt->rt6i_flags |= RTF_GATEWAY;
	}
1139
	rt6_set_from(rt, f6i);
1140
#ifdef CONFIG_IPV6_SUBTREES
1141
	rt->rt6i_src = f6i->fib6_src;
1142 1143 1144
#endif
}

M
Martin KaFai Lau 已提交
1145 1146 1147
static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
					struct in6_addr *saddr)
{
1148
	struct fib6_node *pn, *sn;
M
Martin KaFai Lau 已提交
1149 1150 1151
	while (1) {
		if (fn->fn_flags & RTN_TL_ROOT)
			return NULL;
1152 1153 1154
		pn = rcu_dereference(fn->parent);
		sn = FIB6_SUBTREE(pn);
		if (sn && sn != fn)
1155
			fn = fib6_node_lookup(sn, NULL, saddr);
M
Martin KaFai Lau 已提交
1156 1157 1158 1159 1160 1161
		else
			fn = pn;
		if (fn->fn_flags & RTN_RTINFO)
			return fn;
	}
}
T
Thomas Graf 已提交
1162

1163
static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1164 1165 1166 1167 1168
{
	struct rt6_info *rt = *prt;

	if (dst_hold_safe(&rt->dst))
		return true;
1169
	if (net) {
1170 1171 1172 1173 1174 1175 1176 1177 1178
		rt = net->ipv6.ip6_null_entry;
		dst_hold(&rt->dst);
	} else {
		rt = NULL;
	}
	*prt = rt;
	return false;
}

1179
/* called with rcu_lock held */
1180
static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1181
{
1182 1183 1184
	struct net_device *dev = res->nh->fib_nh_dev;
	struct fib6_info *f6i = res->f6i;
	unsigned short flags;
1185 1186
	struct rt6_info *nrt;

1187
	if (!fib6_info_hold_safe(f6i))
1188
		goto fallback;
1189

1190
	flags = fib6_info_dst_flags(f6i);
1191
	nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1192
	if (!nrt) {
1193
		fib6_info_release(f6i);
1194 1195
		goto fallback;
	}
1196

1197
	ip6_rt_copy_init(nrt, res);
1198 1199 1200 1201 1202
	return nrt;

fallback:
	nrt = dev_net(dev)->ipv6.ip6_null_entry;
	dst_hold(&nrt->dst);
1203 1204 1205
	return nrt;
}

1206 1207
static struct rt6_info *ip6_pol_route_lookup(struct net *net,
					     struct fib6_table *table,
D
David Ahern 已提交
1208 1209 1210
					     struct flowi6 *fl6,
					     const struct sk_buff *skb,
					     int flags)
L
Linus Torvalds 已提交
1211
{
1212
	struct fib6_result res = {};
L
Linus Torvalds 已提交
1213
	struct fib6_node *fn;
1214
	struct rt6_info *rt;
L
Linus Torvalds 已提交
1215

1216 1217 1218
	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
		flags &= ~RT6_LOOKUP_F_IFACE;

1219
	rcu_read_lock();
1220
	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
T
Thomas Graf 已提交
1221
restart:
1222 1223 1224
	res.f6i = rcu_dereference(fn->leaf);
	if (!res.f6i)
		res.f6i = net->ipv6.fib6_null_entry;
1225
	else
1226 1227
		rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
				 flags);
1228

1229
	if (res.f6i == net->ipv6.fib6_null_entry) {
M
Martin KaFai Lau 已提交
1230 1231 1232
		fn = fib6_backtrack(fn, &fl6->saddr);
		if (fn)
			goto restart;
1233

1234 1235 1236
		rt = net->ipv6.ip6_null_entry;
		dst_hold(&rt->dst);
		goto out;
1237 1238
	} else if (res.fib6_flags & RTF_REJECT) {
		goto do_create;
1239
	}
1240

1241 1242 1243
	fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
			 fl6->flowi6_oif != 0, skb, flags);

1244
	/* Search through exception table */
1245
	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1246
	if (rt) {
1247
		if (ip6_hold_safe(net, &rt))
1248
			dst_use_noref(&rt->dst, jiffies);
1249
	} else {
1250
do_create:
1251
		rt = ip6_create_rt_rcu(&res);
1252
	}
D
David Ahern 已提交
1253

1254
out:
1255
	trace_fib6_table_lookup(net, &res, table, fl6);
1256

1257
	rcu_read_unlock();
D
David Ahern 已提交
1258

T
Thomas Graf 已提交
1259 1260 1261
	return rt;
}

1262
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
D
David Ahern 已提交
1263
				   const struct sk_buff *skb, int flags)
F
Florian Westphal 已提交
1264
{
D
David Ahern 已提交
1265
	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
F
Florian Westphal 已提交
1266 1267 1268
}
EXPORT_SYMBOL_GPL(ip6_route_lookup);

1269
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
D
David Ahern 已提交
1270 1271
			    const struct in6_addr *saddr, int oif,
			    const struct sk_buff *skb, int strict)
T
Thomas Graf 已提交
1272
{
1273 1274 1275
	struct flowi6 fl6 = {
		.flowi6_oif = oif,
		.daddr = *daddr,
T
Thomas Graf 已提交
1276 1277
	};
	struct dst_entry *dst;
1278
	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
T
Thomas Graf 已提交
1279

1280
	if (saddr) {
1281
		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1282 1283 1284
		flags |= RT6_LOOKUP_F_HAS_SADDR;
	}

D
David Ahern 已提交
1285
	dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
T
Thomas Graf 已提交
1286 1287 1288 1289 1290
	if (dst->error == 0)
		return (struct rt6_info *) dst;

	dst_release(dst);

L
Linus Torvalds 已提交
1291 1292
	return NULL;
}
1293 1294
EXPORT_SYMBOL(rt6_lookup);

T
Thomas Graf 已提交
1295
/* ip6_ins_rt is called with FREE table->tb6_lock.
1296 1297 1298
 * It takes new route entry, the addition fails by any reason the
 * route is released.
 * Caller must hold dst before calling it.
L
Linus Torvalds 已提交
1299 1300
 */

1301
static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1302
			struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
1303 1304
{
	int err;
T
Thomas Graf 已提交
1305
	struct fib6_table *table;
L
Linus Torvalds 已提交
1306

1307
	table = rt->fib6_table;
1308
	spin_lock_bh(&table->tb6_lock);
1309
	err = fib6_add(&table->tb6_root, rt, info, extack);
1310
	spin_unlock_bh(&table->tb6_lock);
L
Linus Torvalds 已提交
1311 1312 1313 1314

	return err;
}

1315
int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1316
{
1317
	struct nl_info info = {	.nl_net = net, };
1318

1319
	return __ip6_ins_rt(rt, &info, NULL);
1320 1321
}

1322
static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1323 1324
					   const struct in6_addr *daddr,
					   const struct in6_addr *saddr)
L
Linus Torvalds 已提交
1325
{
1326
	struct fib6_info *f6i = res->f6i;
1327
	struct net_device *dev;
L
Linus Torvalds 已提交
1328 1329 1330 1331 1332 1333
	struct rt6_info *rt;

	/*
	 *	Clone the route.
	 */

1334
	if (!fib6_info_hold_safe(f6i))
1335 1336
		return NULL;

1337
	dev = ip6_rt_get_dev_rcu(res);
1338
	rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1339
	if (!rt) {
1340
		fib6_info_release(f6i);
M
Martin KaFai Lau 已提交
1341
		return NULL;
1342
	}
M
Martin KaFai Lau 已提交
1343

1344
	ip6_rt_copy_init(rt, res);
M
Martin KaFai Lau 已提交
1345 1346 1347 1348
	rt->rt6i_flags |= RTF_CACHE;
	rt->dst.flags |= DST_HOST;
	rt->rt6i_dst.addr = *daddr;
	rt->rt6i_dst.plen = 128;
L
Linus Torvalds 已提交
1349

1350 1351 1352
	if (!rt6_is_gw_or_nonexthop(res)) {
		if (f6i->fib6_dst.plen != 128 &&
		    ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
M
Martin KaFai Lau 已提交
1353
			rt->rt6i_flags |= RTF_ANYCAST;
L
Linus Torvalds 已提交
1354
#ifdef CONFIG_IPV6_SUBTREES
M
Martin KaFai Lau 已提交
1355 1356 1357
		if (rt->rt6i_src.plen && saddr) {
			rt->rt6i_src.addr = *saddr;
			rt->rt6i_src.plen = 128;
1358
		}
M
Martin KaFai Lau 已提交
1359
#endif
1360
	}
L
Linus Torvalds 已提交
1361

1362 1363
	return rt;
}
L
Linus Torvalds 已提交
1364

1365
static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
M
Martin KaFai Lau 已提交
1366
{
1367 1368
	struct fib6_info *f6i = res->f6i;
	unsigned short flags = fib6_info_dst_flags(f6i);
1369
	struct net_device *dev;
M
Martin KaFai Lau 已提交
1370 1371
	struct rt6_info *pcpu_rt;

1372
	if (!fib6_info_hold_safe(f6i))
1373 1374
		return NULL;

1375
	rcu_read_lock();
1376
	dev = ip6_rt_get_dev_rcu(res);
1377
	pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1378
	rcu_read_unlock();
1379
	if (!pcpu_rt) {
1380
		fib6_info_release(f6i);
M
Martin KaFai Lau 已提交
1381
		return NULL;
1382
	}
1383
	ip6_rt_copy_init(pcpu_rt, res);
M
Martin KaFai Lau 已提交
1384 1385 1386 1387
	pcpu_rt->rt6i_flags |= RTF_PCPU;
	return pcpu_rt;
}

1388
/* It should be called with rcu_read_lock() acquired */
1389
static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
M
Martin KaFai Lau 已提交
1390
{
1391
	struct rt6_info *pcpu_rt;
M
Martin KaFai Lau 已提交
1392

1393
	pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
M
Martin KaFai Lau 已提交
1394

1395 1396 1397
	return pcpu_rt;
}

1398
static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1399
					    const struct fib6_result *res)
1400 1401
{
	struct rt6_info *pcpu_rt, *prev, **p;
M
Martin KaFai Lau 已提交
1402

1403
	pcpu_rt = ip6_rt_pcpu_alloc(res);
1404 1405
	if (!pcpu_rt)
		return NULL;
M
Martin KaFai Lau 已提交
1406

1407
	p = this_cpu_ptr(res->nh->rt6i_pcpu);
1408
	prev = cmpxchg(p, NULL, pcpu_rt);
1409
	BUG_ON(prev);
1410

E
Eric Dumazet 已提交
1411 1412 1413 1414 1415 1416 1417
	if (res->f6i->fib6_destroying) {
		struct fib6_info *from;

		from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
		fib6_info_release(from);
	}

M
Martin KaFai Lau 已提交
1418 1419 1420
	return pcpu_rt;
}

1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
/* exception hash table implementation
 */
static DEFINE_SPINLOCK(rt6_exception_lock);

/* Remove rt6_ex from hash table and free the memory
 * Caller must hold rt6_exception_lock
 */
static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
				 struct rt6_exception *rt6_ex)
{
1431
	struct fib6_info *from;
1432
	struct net *net;
W
Wei Wang 已提交
1433

1434 1435
	if (!bucket || !rt6_ex)
		return;
1436 1437

	net = dev_net(rt6_ex->rt6i->dst.dev);
1438 1439 1440 1441 1442
	net->ipv6.rt6_stats->fib_rt_cache--;

	/* purge completely the exception to allow releasing the held resources:
	 * some [sk] cache may keep the dst around for unlimited time
	 */
1443
	from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1444 1445 1446
	fib6_info_release(from);
	dst_dev_put(&rt6_ex->rt6i->dst);

1447
	hlist_del_rcu(&rt6_ex->hlist);
1448
	dst_release(&rt6_ex->rt6i->dst);
1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
	kfree_rcu(rt6_ex, rcu);
	WARN_ON_ONCE(!bucket->depth);
	bucket->depth--;
}

/* Remove oldest rt6_ex in bucket and free the memory
 * Caller must hold rt6_exception_lock
 */
static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
{
	struct rt6_exception *rt6_ex, *oldest = NULL;

	if (!bucket)
		return;

	hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
		if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
			oldest = rt6_ex;
	}
	rt6_remove_exception(bucket, oldest);
}

static u32 rt6_exception_hash(const struct in6_addr *dst,
			      const struct in6_addr *src)
{
	static u32 seed __read_mostly;
	u32 val;

	net_get_random_once(&seed, sizeof(seed));
	val = jhash(dst, sizeof(*dst), seed);

#ifdef CONFIG_IPV6_SUBTREES
	if (src)
		val = jhash(src, sizeof(*src), val);
#endif
	return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
}

/* Helper function to find the cached rt in the hash table
 * and update bucket pointer to point to the bucket for this
 * (daddr, saddr) pair
 * Caller must hold rt6_exception_lock
 */
static struct rt6_exception *
__rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
			      const struct in6_addr *daddr,
			      const struct in6_addr *saddr)
{
	struct rt6_exception *rt6_ex;
	u32 hval;

	if (!(*bucket) || !daddr)
		return NULL;

	hval = rt6_exception_hash(daddr, saddr);
	*bucket += hval;

	hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
		struct rt6_info *rt6 = rt6_ex->rt6i;
		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);

#ifdef CONFIG_IPV6_SUBTREES
		if (matched && saddr)
			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
#endif
		if (matched)
			return rt6_ex;
	}
	return NULL;
}

/* Helper function to find the cached rt in the hash table
 * and update bucket pointer to point to the bucket for this
 * (daddr, saddr) pair
 * Caller must hold rcu_read_lock()
 */
static struct rt6_exception *
__rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
			 const struct in6_addr *daddr,
			 const struct in6_addr *saddr)
{
	struct rt6_exception *rt6_ex;
	u32 hval;

	WARN_ON_ONCE(!rcu_read_lock_held());

	if (!(*bucket) || !daddr)
		return NULL;

	hval = rt6_exception_hash(daddr, saddr);
	*bucket += hval;

	hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
		struct rt6_info *rt6 = rt6_ex->rt6i;
		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);

#ifdef CONFIG_IPV6_SUBTREES
		if (matched && saddr)
			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
#endif
		if (matched)
			return rt6_ex;
	}
	return NULL;
}

1555
static unsigned int fib6_mtu(const struct fib6_result *res)
1556
{
1557
	const struct fib6_nh *nh = res->nh;
1558 1559
	unsigned int mtu;

1560 1561
	if (res->f6i->fib6_pmtu) {
		mtu = res->f6i->fib6_pmtu;
D
David Ahern 已提交
1562
	} else {
1563
		struct net_device *dev = nh->fib_nh_dev;
D
David Ahern 已提交
1564 1565 1566 1567 1568 1569 1570 1571
		struct inet6_dev *idev;

		rcu_read_lock();
		idev = __in6_dev_get(dev);
		mtu = idev->cnf.mtu6;
		rcu_read_unlock();
	}

1572 1573
	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);

1574
	return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1575 1576
}

1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
#define FIB6_EXCEPTION_BUCKET_FLUSHED  0x1UL

/* used when the flushed bit is not relevant, only access to the bucket
 * (ie., all bucket users except rt6_insert_exception);
 *
 * called under rcu lock; sometimes called with rt6_exception_lock held
 */
static
struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
						       spinlock_t *lock)
{
	struct rt6_exception_bucket *bucket;

	if (lock)
		bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
						   lockdep_is_held(lock));
	else
		bucket = rcu_dereference(nh->rt6i_exception_bucket);

	/* remove bucket flushed bit if set */
	if (bucket) {
		unsigned long p = (unsigned long)bucket;

		p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
		bucket = (struct rt6_exception_bucket *)p;
	}

	return bucket;
}

static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
{
	unsigned long p = (unsigned long)bucket;

	return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
}

/* called with rt6_exception_lock held */
static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
					      spinlock_t *lock)
{
	struct rt6_exception_bucket *bucket;
	unsigned long p;

	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
					   lockdep_is_held(lock));

	p = (unsigned long)bucket;
	p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
	bucket = (struct rt6_exception_bucket *)p;
	rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
}

1630
static int rt6_insert_exception(struct rt6_info *nrt,
1631
				const struct fib6_result *res)
1632
{
1633
	struct net *net = dev_net(nrt->dst.dev);
1634
	struct rt6_exception_bucket *bucket;
1635
	struct fib6_info *f6i = res->f6i;
1636 1637
	struct in6_addr *src_key = NULL;
	struct rt6_exception *rt6_ex;
1638
	struct fib6_nh *nh = res->nh;
1639 1640 1641 1642
	int err = 0;

	spin_lock_bh(&rt6_exception_lock);

1643 1644
	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
					  lockdep_is_held(&rt6_exception_lock));
1645 1646 1647 1648 1649 1650 1651
	if (!bucket) {
		bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
				 GFP_ATOMIC);
		if (!bucket) {
			err = -ENOMEM;
			goto out;
		}
1652 1653 1654 1655
		rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
	} else if (fib6_nh_excptn_bucket_flushed(bucket)) {
		err = -EINVAL;
		goto out;
1656 1657 1658
	}

#ifdef CONFIG_IPV6_SUBTREES
1659
	/* fib6_src.plen != 0 indicates f6i is in subtree
1660
	 * and exception table is indexed by a hash of
1661
	 * both fib6_dst and fib6_src.
1662
	 * Otherwise, the exception table is indexed by
1663
	 * a hash of only fib6_dst.
1664
	 */
1665
	if (f6i->fib6_src.plen)
1666 1667
		src_key = &nrt->rt6i_src.addr;
#endif
1668
	/* rt6_mtu_change() might lower mtu on f6i.
1669
	 * Only insert this exception route if its mtu
1670
	 * is less than f6i's mtu value.
1671
	 */
1672
	if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1673 1674 1675
		err = -EINVAL;
		goto out;
	}
1676

1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
	rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
					       src_key);
	if (rt6_ex)
		rt6_remove_exception(bucket, rt6_ex);

	rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
	if (!rt6_ex) {
		err = -ENOMEM;
		goto out;
	}
	rt6_ex->rt6i = nrt;
	rt6_ex->stamp = jiffies;
	hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
	bucket->depth++;
W
Wei Wang 已提交
1691
	net->ipv6.rt6_stats->fib_rt_cache++;
1692 1693 1694 1695 1696 1697 1698 1699

	if (bucket->depth > FIB6_MAX_DEPTH)
		rt6_exception_remove_oldest(bucket);

out:
	spin_unlock_bh(&rt6_exception_lock);

	/* Update fn->fn_sernum to invalidate all cached dst */
1700
	if (!err) {
1701 1702 1703
		spin_lock_bh(&f6i->fib6_table->tb6_lock);
		fib6_update_sernum(net, f6i);
		spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1704 1705
		fib6_force_start_gc(net);
	}
1706 1707 1708 1709

	return err;
}

D
David Ahern 已提交
1710
static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1711 1712 1713 1714 1715 1716 1717 1718
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	struct hlist_node *tmp;
	int i;

	spin_lock_bh(&rt6_exception_lock);

1719
	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1720 1721 1722
	if (!bucket)
		goto out;

1723 1724 1725 1726
	/* Prevent rt6_insert_exception() to recreate the bucket list */
	if (!from)
		fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);

1727
	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1728 1729 1730 1731 1732 1733
		hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
			if (!from ||
			    rcu_access_pointer(rt6_ex->rt6i->from) == from)
				rt6_remove_exception(bucket, rt6_ex);
		}
		WARN_ON_ONCE(!from && bucket->depth);
1734 1735 1736 1737 1738 1739
		bucket++;
	}
out:
	spin_unlock_bh(&rt6_exception_lock);
}

1740 1741 1742 1743 1744 1745 1746 1747 1748
static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
{
	struct fib6_info *f6i = arg;

	fib6_nh_flush_exceptions(nh, f6i);

	return 0;
}

D
David Ahern 已提交
1749 1750
void rt6_flush_exceptions(struct fib6_info *f6i)
{
1751 1752 1753 1754 1755
	if (f6i->nh)
		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
					 f6i);
	else
		fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
D
David Ahern 已提交
1756 1757
}

1758 1759 1760
/* Find cached rt in the hash table inside passed in rt
 * Caller has to hold rcu_read_lock()
 */
1761
static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1762 1763
					   const struct in6_addr *daddr,
					   const struct in6_addr *saddr)
1764
{
1765
	const struct in6_addr *src_key = NULL;
1766 1767
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
1768
	struct rt6_info *ret = NULL;
1769 1770

#ifdef CONFIG_IPV6_SUBTREES
1771
	/* fib6i_src.plen != 0 indicates f6i is in subtree
1772
	 * and exception table is indexed by a hash of
1773
	 * both fib6_dst and fib6_src.
1774 1775 1776 1777 1778 1779 1780
	 * However, the src addr used to create the hash
	 * might not be exactly the passed in saddr which
	 * is a /128 addr from the flow.
	 * So we need to use f6i->fib6_src to redo lookup
	 * if the passed in saddr does not find anything.
	 * (See the logic in ip6_rt_cache_alloc() on how
	 * rt->rt6i_src is updated.)
1781
	 */
1782
	if (res->f6i->fib6_src.plen)
1783
		src_key = saddr;
1784
find_ex:
1785
#endif
1786
	bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1787 1788 1789
	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);

	if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1790
		ret = rt6_ex->rt6i;
1791

1792 1793 1794 1795 1796 1797 1798 1799
#ifdef CONFIG_IPV6_SUBTREES
	/* Use fib6_src as src_key and redo lookup */
	if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
		src_key = &res->f6i->fib6_src.addr;
		goto find_ex;
	}
#endif

1800
	return ret;
1801 1802 1803
}

/* Remove the passed in cached rt from the hash table that contains it */
1804
static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
D
David Ahern 已提交
1805
				    const struct rt6_info *rt)
1806
{
D
David Ahern 已提交
1807
	const struct in6_addr *src_key = NULL;
1808 1809 1810 1811
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	int err;

1812
	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1813 1814 1815
		return -ENOENT;

	spin_lock_bh(&rt6_exception_lock);
1816 1817
	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);

1818
#ifdef CONFIG_IPV6_SUBTREES
1819 1820 1821
	/* rt6i_src.plen != 0 indicates 'from' is in subtree
	 * and exception table is indexed by a hash of
	 * both rt6i_dst and rt6i_src.
1822 1823 1824
	 * Otherwise, the exception table is indexed by
	 * a hash of only rt6i_dst.
	 */
D
David Ahern 已提交
1825
	if (plen)
1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
		src_key = &rt->rt6i_src.addr;
#endif
	rt6_ex = __rt6_find_exception_spinlock(&bucket,
					       &rt->rt6i_dst.addr,
					       src_key);
	if (rt6_ex) {
		rt6_remove_exception(bucket, rt6_ex);
		err = 0;
	} else {
		err = -ENOENT;
	}

	spin_unlock_bh(&rt6_exception_lock);
	return err;
}

1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858
struct fib6_nh_excptn_arg {
	struct rt6_info	*rt;
	int		plen;
};

static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
{
	struct fib6_nh_excptn_arg *arg = _arg;
	int err;

	err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
	if (err == 0)
		return 1;

	return 0;
}

D
David Ahern 已提交
1859 1860 1861 1862 1863
static int rt6_remove_exception_rt(struct rt6_info *rt)
{
	struct fib6_info *from;

	from = rcu_dereference(rt->from);
1864
	if (!from || !(rt->rt6i_flags & RTF_CACHE))
D
David Ahern 已提交
1865 1866
		return -EINVAL;

1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
	if (from->nh) {
		struct fib6_nh_excptn_arg arg = {
			.rt = rt,
			.plen = from->fib6_src.plen
		};
		int rc;

		/* rc = 1 means an entry was found */
		rc = nexthop_for_each_fib6_nh(from->nh,
					      rt6_nh_remove_exception_rt,
					      &arg);
		return rc ? 0 : -ENOENT;
	}

1881
	return fib6_nh_remove_exception(from->fib6_nh,
1882
					from->fib6_src.plen, rt);
D
David Ahern 已提交
1883 1884
}

1885 1886 1887
/* Find rt6_ex which contains the passed in rt cache and
 * refresh its stamp
 */
1888
static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
D
David Ahern 已提交
1889
				     const struct rt6_info *rt)
1890
{
D
David Ahern 已提交
1891
	const struct in6_addr *src_key = NULL;
1892 1893
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
1894

1895
	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1896
#ifdef CONFIG_IPV6_SUBTREES
1897 1898 1899
	/* rt6i_src.plen != 0 indicates 'from' is in subtree
	 * and exception table is indexed by a hash of
	 * both rt6i_dst and rt6i_src.
1900 1901 1902
	 * Otherwise, the exception table is indexed by
	 * a hash of only rt6i_dst.
	 */
D
David Ahern 已提交
1903
	if (plen)
1904 1905
		src_key = &rt->rt6i_src.addr;
#endif
1906
	rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1907 1908
	if (rt6_ex)
		rt6_ex->stamp = jiffies;
D
David Ahern 已提交
1909 1910
}

1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
struct fib6_nh_match_arg {
	const struct net_device *dev;
	const struct in6_addr	*gw;
	struct fib6_nh		*match;
};

/* determine if fib6_nh has given device and gateway */
static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
{
	struct fib6_nh_match_arg *arg = _arg;

	if (arg->dev != nh->fib_nh_dev ||
	    (arg->gw && !nh->fib_nh_gw_family) ||
	    (!arg->gw && nh->fib_nh_gw_family) ||
	    (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
		return 0;

	arg->match = nh;

	/* found a match, break the loop */
	return 1;
}

D
David Ahern 已提交
1934 1935 1936
static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
{
	struct fib6_info *from;
1937
	struct fib6_nh *fib6_nh;
1938

D
David Ahern 已提交
1939 1940 1941 1942 1943 1944
	rcu_read_lock();

	from = rcu_dereference(rt->from);
	if (!from || !(rt->rt6i_flags & RTF_CACHE))
		goto unlock;

1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
	if (from->nh) {
		struct fib6_nh_match_arg arg = {
			.dev = rt->dst.dev,
			.gw = &rt->rt6i_gateway,
		};

		nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);

		if (!arg.match)
			return;
		fib6_nh = arg.match;
	} else {
		fib6_nh = from->fib6_nh;
	}
	fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
1960
unlock:
1961 1962 1963
	rcu_read_unlock();
}

1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
					 struct rt6_info *rt, int mtu)
{
	/* If the new MTU is lower than the route PMTU, this new MTU will be the
	 * lowest MTU in the path: always allow updating the route PMTU to
	 * reflect PMTU decreases.
	 *
	 * If the new MTU is higher, and the route PMTU is equal to the local
	 * MTU, this means the old MTU is the lowest in the path, so allow
	 * updating it: if other nodes now have lower MTUs, PMTU discovery will
	 * handle this.
	 */

	if (dst_mtu(&rt->dst) >= mtu)
		return true;

	if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
		return true;

	return false;
}

static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1987
				       const struct fib6_nh *nh, int mtu)
1988 1989 1990 1991 1992
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	int i;

1993
	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1994 1995 1996 1997 1998 1999 2000 2001
	if (!bucket)
		return;

	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
			struct rt6_info *entry = rt6_ex->rt6i;

			/* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2002
			 * route), the metrics of its rt->from have already
2003 2004
			 * been updated.
			 */
2005
			if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2006
			    rt6_mtu_change_route_allowed(idev, entry, mtu))
2007
				dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2008
		}
2009
		bucket++;
2010 2011 2012
	}
}

2013 2014
#define RTF_CACHE_GATEWAY	(RTF_GATEWAY | RTF_CACHE)

2015 2016
static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
					    const struct in6_addr *gateway)
2017 2018 2019 2020 2021 2022
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	struct hlist_node *tmp;
	int i;

2023
	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2024 2025 2026
		return;

	spin_lock_bh(&rt6_exception_lock);
2027
	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
	if (bucket) {
		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
			hlist_for_each_entry_safe(rt6_ex, tmp,
						  &bucket->chain, hlist) {
				struct rt6_info *entry = rt6_ex->rt6i;

				if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
				    RTF_CACHE_GATEWAY &&
				    ipv6_addr_equal(gateway,
						    &entry->rt6i_gateway)) {
					rt6_remove_exception(bucket, rt6_ex);
				}
			}
			bucket++;
		}
	}

	spin_unlock_bh(&rt6_exception_lock);
}

2048 2049 2050 2051 2052 2053 2054
static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
				      struct rt6_exception *rt6_ex,
				      struct fib6_gc_args *gc_args,
				      unsigned long now)
{
	struct rt6_info *rt = rt6_ex->rt6i;

2055 2056 2057 2058 2059 2060
	/* we are pruning and obsoleting aged-out and non gateway exceptions
	 * even if others have still references to them, so that on next
	 * dst_check() such references can be dropped.
	 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
	 * expired, independently from their aging, as per RFC 8201 section 4
	 */
W
Wei Wang 已提交
2061 2062 2063 2064 2065 2066 2067 2068
	if (!(rt->rt6i_flags & RTF_EXPIRES)) {
		if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
			RT6_TRACE("aging clone %p\n", rt);
			rt6_remove_exception(bucket, rt6_ex);
			return;
		}
	} else if (time_after(jiffies, rt->dst.expires)) {
		RT6_TRACE("purging expired route %p\n", rt);
2069 2070
		rt6_remove_exception(bucket, rt6_ex);
		return;
W
Wei Wang 已提交
2071 2072 2073
	}

	if (rt->rt6i_flags & RTF_GATEWAY) {
2074 2075 2076
		struct neighbour *neigh;
		__u8 neigh_flags = 0;

2077 2078
		neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
		if (neigh)
2079
			neigh_flags = neigh->flags;
2080

2081 2082 2083 2084 2085 2086 2087
		if (!(neigh_flags & NTF_ROUTER)) {
			RT6_TRACE("purging route %p via non-router but gateway\n",
				  rt);
			rt6_remove_exception(bucket, rt6_ex);
			return;
		}
	}
W
Wei Wang 已提交
2088

2089 2090 2091
	gc_args->more++;
}

2092
static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
D
David Ahern 已提交
2093 2094
				   struct fib6_gc_args *gc_args,
				   unsigned long now)
2095 2096 2097 2098 2099 2100
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	struct hlist_node *tmp;
	int i;

2101
	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2102 2103
		return;

2104 2105
	rcu_read_lock_bh();
	spin_lock(&rt6_exception_lock);
2106
	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
	if (bucket) {
		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
			hlist_for_each_entry_safe(rt6_ex, tmp,
						  &bucket->chain, hlist) {
				rt6_age_examine_exception(bucket, rt6_ex,
							  gc_args, now);
			}
			bucket++;
		}
	}
2117 2118
	spin_unlock(&rt6_exception_lock);
	rcu_read_unlock_bh();
2119 2120
}

2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133
struct fib6_nh_age_excptn_arg {
	struct fib6_gc_args	*gc_args;
	unsigned long		now;
};

static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
{
	struct fib6_nh_age_excptn_arg *arg = _arg;

	fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
	return 0;
}

2134
void rt6_age_exceptions(struct fib6_info *f6i,
D
David Ahern 已提交
2135 2136 2137
			struct fib6_gc_args *gc_args,
			unsigned long now)
{
2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148
	if (f6i->nh) {
		struct fib6_nh_age_excptn_arg arg = {
			.gc_args = gc_args,
			.now = now
		};

		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
					 &arg);
	} else {
		fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
	}
D
David Ahern 已提交
2149 2150
}

2151
/* must be called with rcu lock held */
2152 2153
int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
		      struct flowi6 *fl6, struct fib6_result *res, int strict)
L
Linus Torvalds 已提交
2154
{
2155
	struct fib6_node *fn, *saved_fn;
L
Linus Torvalds 已提交
2156

2157
	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2158
	saved_fn = fn;
L
Linus Torvalds 已提交
2159

D
David Ahern 已提交
2160 2161 2162
	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
		oif = 0;

M
Martin KaFai Lau 已提交
2163
redo_rt6_select:
2164 2165
	rt6_select(net, fn, oif, res, strict);
	if (res->f6i == net->ipv6.fib6_null_entry) {
M
Martin KaFai Lau 已提交
2166 2167 2168
		fn = fib6_backtrack(fn, &fl6->saddr);
		if (fn)
			goto redo_rt6_select;
2169 2170 2171 2172 2173 2174
		else if (strict & RT6_LOOKUP_F_REACHABLE) {
			/* also consider unreachable route */
			strict &= ~RT6_LOOKUP_F_REACHABLE;
			fn = saved_fn;
			goto redo_rt6_select;
		}
M
Martin KaFai Lau 已提交
2175 2176
	}

2177
	trace_fib6_table_lookup(net, res, table, fl6);
2178

2179
	return 0;
2180 2181 2182 2183 2184 2185
}

struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
			       int oif, struct flowi6 *fl6,
			       const struct sk_buff *skb, int flags)
{
2186
	struct fib6_result res = {};
2187
	struct rt6_info *rt = NULL;
2188 2189
	int strict = 0;

2190 2191 2192
	WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
		     !rcu_read_lock_held());

2193 2194 2195 2196 2197 2198 2199
	strict |= flags & RT6_LOOKUP_F_IFACE;
	strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
	if (net->ipv6.devconf_all->forwarding == 0)
		strict |= RT6_LOOKUP_F_REACHABLE;

	rcu_read_lock();

2200
	fib6_table_lookup(net, table, oif, fl6, &res, strict);
2201 2202
	if (res.f6i == net->ipv6.fib6_null_entry)
		goto out;
2203

2204
	fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2205

2206
	/*Search through exception table */
2207
	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2208
	if (rt) {
2209
		goto out;
2210
	} else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2211
			    !res.nh->fib_nh_gw_family)) {
2212 2213 2214 2215 2216
		/* Create a RTF_CACHE clone which will not be
		 * owned by the fib6 tree.  It is for the special case where
		 * the daddr in the skb during the neighbor look-up is different
		 * from the fl6->daddr used to look-up route here.
		 */
2217
		rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
T
Thomas Graf 已提交
2218

2219 2220 2221 2222 2223
		if (rt) {
			/* 1 refcnt is taken during ip6_rt_cache_alloc().
			 * As rt6_uncached_list_add() does not consume refcnt,
			 * this refcnt is always returned to the caller even
			 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2224
			 */
2225
			rt6_uncached_list_add(rt);
W
Wei Wang 已提交
2226
			atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2227
			rcu_read_unlock();
D
David Ahern 已提交
2228

2229 2230
			return rt;
		}
M
Martin KaFai Lau 已提交
2231 2232
	} else {
		/* Get a percpu copy */
2233
		local_bh_disable();
2234
		rt = rt6_get_pcpu_route(&res);
M
Martin KaFai Lau 已提交
2235

2236 2237
		if (!rt)
			rt = rt6_make_pcpu_route(net, &res);
2238

2239
		local_bh_enable();
M
Martin KaFai Lau 已提交
2240
	}
2241 2242 2243 2244 2245 2246 2247 2248
out:
	if (!rt)
		rt = net->ipv6.ip6_null_entry;
	if (!(flags & RT6_LOOKUP_F_DST_NOREF))
		ip6_hold_safe(net, &rt);
	rcu_read_unlock();

	return rt;
L
Linus Torvalds 已提交
2249
}
2250
EXPORT_SYMBOL_GPL(ip6_pol_route);
L
Linus Torvalds 已提交
2251

D
David Ahern 已提交
2252 2253 2254 2255 2256
static struct rt6_info *ip6_pol_route_input(struct net *net,
					    struct fib6_table *table,
					    struct flowi6 *fl6,
					    const struct sk_buff *skb,
					    int flags)
2257
{
D
David Ahern 已提交
2258
	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2259 2260
}

2261 2262
struct dst_entry *ip6_route_input_lookup(struct net *net,
					 struct net_device *dev,
D
David Ahern 已提交
2263 2264 2265
					 struct flowi6 *fl6,
					 const struct sk_buff *skb,
					 int flags)
2266 2267 2268 2269
{
	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
		flags |= RT6_LOOKUP_F_IFACE;

D
David Ahern 已提交
2270
	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2271
}
2272
EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2273

2274
static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2275 2276
				  struct flow_keys *keys,
				  struct flow_keys *flkeys)
2277 2278 2279
{
	const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
	const struct ipv6hdr *key_iph = outer_iph;
2280
	struct flow_keys *_flkeys = flkeys;
2281 2282 2283
	const struct ipv6hdr *inner_iph;
	const struct icmp6hdr *icmph;
	struct ipv6hdr _inner_iph;
2284
	struct icmp6hdr _icmph;
2285 2286 2287 2288

	if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
		goto out;

2289 2290 2291 2292 2293
	icmph = skb_header_pointer(skb, skb_transport_offset(skb),
				   sizeof(_icmph), &_icmph);
	if (!icmph)
		goto out;

2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
	if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
	    icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
	    icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
	    icmph->icmp6_type != ICMPV6_PARAMPROB)
		goto out;

	inner_iph = skb_header_pointer(skb,
				       skb_transport_offset(skb) + sizeof(*icmph),
				       sizeof(_inner_iph), &_inner_iph);
	if (!inner_iph)
		goto out;

	key_iph = inner_iph;
2307
	_flkeys = NULL;
2308
out:
2309 2310 2311 2312 2313 2314 2315 2316
	if (_flkeys) {
		keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
		keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
		keys->tags.flow_label = _flkeys->tags.flow_label;
		keys->basic.ip_proto = _flkeys->basic.ip_proto;
	} else {
		keys->addrs.v6addrs.src = key_iph->saddr;
		keys->addrs.v6addrs.dst = key_iph->daddr;
2317
		keys->tags.flow_label = ip6_flowlabel(key_iph);
2318 2319
		keys->basic.ip_proto = key_iph->nexthdr;
	}
2320 2321 2322
}

/* if skb is set it will be used and fl6 can be NULL */
2323 2324
u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
		       const struct sk_buff *skb, struct flow_keys *flkeys)
2325 2326
{
	struct flow_keys hash_keys;
2327
	u32 mhash;
2328

2329
	switch (ip6_multipath_hash_policy(net)) {
2330 2331 2332 2333 2334 2335 2336 2337
	case 0:
		memset(&hash_keys, 0, sizeof(hash_keys));
		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
		if (skb) {
			ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
		} else {
			hash_keys.addrs.v6addrs.src = fl6->saddr;
			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2338
			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372
			hash_keys.basic.ip_proto = fl6->flowi6_proto;
		}
		break;
	case 1:
		if (skb) {
			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
			struct flow_keys keys;

			/* short-circuit if we already have L4 hash present */
			if (skb->l4_hash)
				return skb_get_hash_raw(skb) >> 1;

			memset(&hash_keys, 0, sizeof(hash_keys));

                        if (!flkeys) {
				skb_flow_dissect_flow_keys(skb, &keys, flag);
				flkeys = &keys;
			}
			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
			hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
			hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
			hash_keys.ports.src = flkeys->ports.src;
			hash_keys.ports.dst = flkeys->ports.dst;
			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
		} else {
			memset(&hash_keys, 0, sizeof(hash_keys));
			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
			hash_keys.addrs.v6addrs.src = fl6->saddr;
			hash_keys.addrs.v6addrs.dst = fl6->daddr;
			hash_keys.ports.src = fl6->fl6_sport;
			hash_keys.ports.dst = fl6->fl6_dport;
			hash_keys.basic.ip_proto = fl6->flowi6_proto;
		}
		break;
2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
	case 2:
		memset(&hash_keys, 0, sizeof(hash_keys));
		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
		if (skb) {
			struct flow_keys keys;

			if (!flkeys) {
				skb_flow_dissect_flow_keys(skb, &keys, 0);
				flkeys = &keys;
			}

			/* Inner can be v4 or v6 */
			if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
				hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
				hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
			} else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
				hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
				hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
				hash_keys.tags.flow_label = flkeys->tags.flow_label;
				hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
			} else {
				/* Same as case 0 */
				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
				ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
			}
		} else {
			/* Same as case 0 */
			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
			hash_keys.addrs.v6addrs.src = fl6->saddr;
			hash_keys.addrs.v6addrs.dst = fl6->daddr;
			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
			hash_keys.basic.ip_proto = fl6->flowi6_proto;
		}
		break;
2409
	}
2410
	mhash = flow_hash_from_keys(&hash_keys);
2411

2412
	return mhash >> 1;
2413 2414
}

2415
/* Called with rcu held */
T
Thomas Graf 已提交
2416 2417
void ip6_route_input(struct sk_buff *skb)
{
2418
	const struct ipv6hdr *iph = ipv6_hdr(skb);
2419
	struct net *net = dev_net(skb->dev);
2420
	int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2421
	struct ip_tunnel_info *tun_info;
2422
	struct flowi6 fl6 = {
2423
		.flowi6_iif = skb->dev->ifindex,
2424 2425
		.daddr = iph->daddr,
		.saddr = iph->saddr,
2426
		.flowlabel = ip6_flowinfo(iph),
2427 2428
		.flowi6_mark = skb->mark,
		.flowi6_proto = iph->nexthdr,
T
Thomas Graf 已提交
2429
	};
2430
	struct flow_keys *flkeys = NULL, _flkeys;
2431

2432
	tun_info = skb_tunnel_info(skb);
2433
	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2434
		fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2435 2436 2437 2438

	if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
		flkeys = &_flkeys;

2439
	if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2440
		fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2441
	skb_dst_drop(skb);
2442 2443
	skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
						      &fl6, skb, flags));
T
Thomas Graf 已提交
2444 2445
}

D
David Ahern 已提交
2446 2447 2448 2449 2450
static struct rt6_info *ip6_pol_route_output(struct net *net,
					     struct fib6_table *table,
					     struct flowi6 *fl6,
					     const struct sk_buff *skb,
					     int flags)
L
Linus Torvalds 已提交
2451
{
D
David Ahern 已提交
2452
	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
T
Thomas Graf 已提交
2453 2454
}

2455 2456 2457
struct dst_entry *ip6_route_output_flags_noref(struct net *net,
					       const struct sock *sk,
					       struct flowi6 *fl6, int flags)
T
Thomas Graf 已提交
2458
{
2459
	bool any_src;
T
Thomas Graf 已提交
2460

2461 2462
	if (ipv6_addr_type(&fl6->daddr) &
	    (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2463 2464
		struct dst_entry *dst;

2465
		/* This function does not take refcnt on the dst */
2466 2467 2468 2469
		dst = l3mdev_link_scope_lookup(net, fl6);
		if (dst)
			return dst;
	}
D
David Ahern 已提交
2470

2471
	fl6->flowi6_iif = LOOPBACK_IFINDEX;
2472

2473
	flags |= RT6_LOOKUP_F_DST_NOREF;
2474
	any_src = ipv6_addr_any(&fl6->saddr);
2475
	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2476
	    (fl6->flowi6_oif && any_src))
2477
		flags |= RT6_LOOKUP_F_IFACE;
T
Thomas Graf 已提交
2478

2479
	if (!any_src)
2480
		flags |= RT6_LOOKUP_F_HAS_SADDR;
2481 2482
	else if (sk)
		flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2483

D
David Ahern 已提交
2484
	return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
L
Linus Torvalds 已提交
2485
}
2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507
EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);

struct dst_entry *ip6_route_output_flags(struct net *net,
					 const struct sock *sk,
					 struct flowi6 *fl6,
					 int flags)
{
        struct dst_entry *dst;
        struct rt6_info *rt6;

        rcu_read_lock();
        dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
        rt6 = (struct rt6_info *)dst;
        /* For dst cached in uncached_list, refcnt is already taken. */
        if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
                dst = &net->ipv6.ip6_null_entry->dst;
                dst_hold(dst);
        }
        rcu_read_unlock();

        return dst;
}
2508
EXPORT_SYMBOL_GPL(ip6_route_output_flags);
L
Linus Torvalds 已提交
2509

2510
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2511
{
2512
	struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2513
	struct net_device *loopback_dev = net->loopback_dev;
2514 2515
	struct dst_entry *new = NULL;

2516
	rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2517
		       DST_OBSOLETE_DEAD, 0);
2518
	if (rt) {
2519
		rt6_info_init(rt);
W
Wei Wang 已提交
2520
		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2521

2522
		new = &rt->dst;
2523
		new->__use = 1;
2524
		new->input = dst_discard;
E
Eric W. Biederman 已提交
2525
		new->output = dst_discard_out;
2526

2527
		dst_copy_metrics(new, &ort->dst);
2528

2529
		rt->rt6i_idev = in6_dev_get(loopback_dev);
A
Alexey Dobriyan 已提交
2530
		rt->rt6i_gateway = ort->rt6i_gateway;
2531
		rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2532 2533 2534 2535 2536 2537 2538

		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
#ifdef CONFIG_IPV6_SUBTREES
		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
#endif
	}

2539 2540
	dst_release(dst_orig);
	return new ? new : ERR_PTR(-ENOMEM);
2541 2542
}

L
Linus Torvalds 已提交
2543 2544 2545 2546
/*
 *	Destination cache support functions
 */

2547
static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2548
{
2549 2550
	u32 rt_cookie = 0;

2551
	if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2552 2553 2554 2555 2556 2557
		return false;

	if (fib6_check_expired(f6i))
		return false;

	return true;
2558 2559
}

2560 2561 2562
static struct dst_entry *rt6_check(struct rt6_info *rt,
				   struct fib6_info *from,
				   u32 cookie)
2563
{
2564
	u32 rt_cookie = 0;
2565

2566
	if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
2567
	    rt_cookie != cookie)
2568 2569 2570 2571 2572 2573 2574 2575
		return NULL;

	if (rt6_check_expired(rt))
		return NULL;

	return &rt->dst;
}

2576 2577 2578
static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
					    struct fib6_info *from,
					    u32 cookie)
2579
{
2580 2581
	if (!__rt6_check_expired(rt) &&
	    rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2582
	    fib6_check(from, cookie))
2583 2584 2585 2586 2587
		return &rt->dst;
	else
		return NULL;
}

L
Linus Torvalds 已提交
2588 2589
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
{
2590
	struct dst_entry *dst_ret;
2591
	struct fib6_info *from;
L
Linus Torvalds 已提交
2592 2593
	struct rt6_info *rt;

2594 2595 2596
	rt = container_of(dst, struct rt6_info, dst);

	rcu_read_lock();
L
Linus Torvalds 已提交
2597

2598 2599 2600 2601
	/* All IPV6 dsts are created with ->obsolete set to the value
	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
	 * into this function always.
	 */
2602

2603
	from = rcu_dereference(rt->from);
2604

2605 2606 2607
	if (from && (rt->rt6i_flags & RTF_PCPU ||
	    unlikely(!list_empty(&rt->rt6i_uncached))))
		dst_ret = rt6_dst_from_check(rt, from, cookie);
2608
	else
2609
		dst_ret = rt6_check(rt, from, cookie);
2610 2611 2612 2613

	rcu_read_unlock();

	return dst_ret;
L
Linus Torvalds 已提交
2614 2615 2616 2617 2618 2619 2620
}

static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
{
	struct rt6_info *rt = (struct rt6_info *) dst;

	if (rt) {
2621
		if (rt->rt6i_flags & RTF_CACHE) {
2622
			rcu_read_lock();
2623
			if (rt6_check_expired(rt)) {
2624
				rt6_remove_exception_rt(rt);
2625 2626
				dst = NULL;
			}
2627
			rcu_read_unlock();
2628
		} else {
L
Linus Torvalds 已提交
2629
			dst_release(dst);
2630 2631
			dst = NULL;
		}
L
Linus Torvalds 已提交
2632
	}
2633
	return dst;
L
Linus Torvalds 已提交
2634 2635 2636 2637 2638 2639
}

static void ip6_link_failure(struct sk_buff *skb)
{
	struct rt6_info *rt;

2640
	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
L
Linus Torvalds 已提交
2641

E
Eric Dumazet 已提交
2642
	rt = (struct rt6_info *) skb_dst(skb);
L
Linus Torvalds 已提交
2643
	if (rt) {
2644
		rcu_read_lock();
2645
		if (rt->rt6i_flags & RTF_CACHE) {
2646
			rt6_remove_exception_rt(rt);
2647
		} else {
2648
			struct fib6_info *from;
2649 2650
			struct fib6_node *fn;

2651 2652 2653 2654 2655 2656
			from = rcu_dereference(rt->from);
			if (from) {
				fn = rcu_dereference(from->fib6_node);
				if (fn && (rt->rt6i_flags & RTF_DEFAULT))
					fn->fn_sernum = -1;
			}
2657
		}
2658
		rcu_read_unlock();
L
Linus Torvalds 已提交
2659 2660 2661
	}
}

2662 2663
static void rt6_update_expires(struct rt6_info *rt0, int timeout)
{
2664 2665 2666 2667 2668 2669 2670 2671 2672
	if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
		struct fib6_info *from;

		rcu_read_lock();
		from = rcu_dereference(rt0->from);
		if (from)
			rt0->dst.expires = from->expires;
		rcu_read_unlock();
	}
2673 2674 2675 2676 2677

	dst_set_expires(&rt0->dst, timeout);
	rt0->rt6i_flags |= RTF_EXPIRES;
}

2678 2679 2680 2681
static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
{
	struct net *net = dev_net(rt->dst.dev);

2682
	dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2683 2684 2685 2686
	rt->rt6i_flags |= RTF_MODIFIED;
	rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
}

2687 2688 2689
static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
{
	return !(rt->rt6i_flags & RTF_CACHE) &&
2690
		(rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2691 2692
}

2693 2694
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
				 const struct ipv6hdr *iph, u32 mtu)
L
Linus Torvalds 已提交
2695
{
2696
	const struct in6_addr *daddr, *saddr;
2697
	struct rt6_info *rt6 = (struct rt6_info *)dst;
L
Linus Torvalds 已提交
2698

2699 2700 2701
	if (dst_metric_locked(dst, RTAX_MTU))
		return;

2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712
	if (iph) {
		daddr = &iph->daddr;
		saddr = &iph->saddr;
	} else if (sk) {
		daddr = &sk->sk_v6_daddr;
		saddr = &inet6_sk(sk)->saddr;
	} else {
		daddr = NULL;
		saddr = NULL;
	}
	dst_confirm_neigh(dst, daddr);
2713 2714 2715
	mtu = max_t(u32, mtu, IPV6_MIN_MTU);
	if (mtu >= dst_mtu(dst))
		return;
2716

2717
	if (!rt6_cache_allowed_for_pmtu(rt6)) {
2718
		rt6_do_update_pmtu(rt6, mtu);
2719 2720 2721
		/* update rt6_ex->stamp for cache */
		if (rt6->rt6i_flags & RTF_CACHE)
			rt6_update_exception_stamp_rt(rt6);
2722
	} else if (daddr) {
2723
		struct fib6_result res = {};
2724 2725
		struct rt6_info *nrt6;

2726
		rcu_read_lock();
2727 2728
		res.f6i = rcu_dereference(rt6->from);
		if (!res.f6i) {
2729 2730 2731
			rcu_read_unlock();
			return;
		}
2732 2733 2734
		res.fib6_flags = res.f6i->fib6_flags;
		res.fib6_type = res.f6i->fib6_type;

2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756
		if (res.f6i->nh) {
			struct fib6_nh_match_arg arg = {
				.dev = dst->dev,
				.gw = &rt6->rt6i_gateway,
			};

			nexthop_for_each_fib6_nh(res.f6i->nh,
						 fib6_nh_find_match, &arg);

			/* fib6_info uses a nexthop that does not have fib6_nh
			 * using the dst->dev + gw. Should be impossible.
			 */
			if (!arg.match) {
				rcu_read_unlock();
				return;
			}

			res.nh = arg.match;
		} else {
			res.nh = res.f6i->fib6_nh;
		}

2757
		nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2758 2759
		if (nrt6) {
			rt6_do_update_pmtu(nrt6, mtu);
2760
			if (rt6_insert_exception(nrt6, &res))
2761
				dst_release_immediate(&nrt6->dst);
2762
		}
2763
		rcu_read_unlock();
L
Linus Torvalds 已提交
2764 2765 2766
	}
}

2767 2768 2769 2770 2771 2772
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
			       struct sk_buff *skb, u32 mtu)
{
	__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
}

2773
void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2774
		     int oif, u32 mark, kuid_t uid)
2775 2776 2777
{
	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
	struct dst_entry *dst;
2778 2779 2780 2781 2782 2783 2784 2785
	struct flowi6 fl6 = {
		.flowi6_oif = oif,
		.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
		.daddr = iph->daddr,
		.saddr = iph->saddr,
		.flowlabel = ip6_flowinfo(iph),
		.flowi6_uid = uid,
	};
2786 2787 2788

	dst = ip6_route_output(net, NULL, &fl6);
	if (!dst->error)
2789
		__ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2790 2791 2792 2793 2794 2795
	dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_update_pmtu);

void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
2796
	int oif = sk->sk_bound_dev_if;
2797 2798
	struct dst_entry *dst;

2799 2800 2801 2802
	if (!oif && skb->dev)
		oif = l3mdev_master_ifindex(skb->dev);

	ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2803 2804 2805 2806 2807 2808 2809 2810 2811 2812

	dst = __sk_dst_get(sk);
	if (!dst || !dst->obsolete ||
	    dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
		return;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
		ip6_datagram_dst_update(sk, false);
	bh_unlock_sock(sk);
2813 2814 2815
}
EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);

2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832
void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
			   const struct flowi6 *fl6)
{
#ifdef CONFIG_IPV6_SUBTREES
	struct ipv6_pinfo *np = inet6_sk(sk);
#endif

	ip6_dst_store(sk, dst,
		      ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
		      &sk->sk_v6_daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
		      ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
		      &np->saddr :
#endif
		      NULL);
}

2833
static bool ip6_redirect_nh_match(const struct fib6_result *res,
2834 2835 2836 2837
				  struct flowi6 *fl6,
				  const struct in6_addr *gw,
				  struct rt6_info **ret)
{
2838 2839
	const struct fib6_nh *nh = res->nh;

2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851
	if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
	    fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
		return false;

	/* rt_cache's gateway might be different from its 'parent'
	 * in the case of an ip redirect.
	 * So we keep searching in the exception table if the gateway
	 * is different.
	 */
	if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
		struct rt6_info *rt_cache;

2852
		rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
2853 2854 2855 2856 2857 2858 2859 2860 2861 2862
		if (rt_cache &&
		    ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
			*ret = rt_cache;
			return true;
		}
		return false;
	}
	return true;
}

2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877
struct fib6_nh_rd_arg {
	struct fib6_result	*res;
	struct flowi6		*fl6;
	const struct in6_addr	*gw;
	struct rt6_info		**ret;
};

static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
{
	struct fib6_nh_rd_arg *arg = _arg;

	arg->res->nh = nh;
	return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
}

2878 2879 2880 2881 2882 2883 2884 2885 2886
/* Handle redirects */
struct ip6rd_flowi {
	struct flowi6 fl6;
	struct in6_addr gateway;
};

static struct rt6_info *__ip6_route_redirect(struct net *net,
					     struct fib6_table *table,
					     struct flowi6 *fl6,
D
David Ahern 已提交
2887
					     const struct sk_buff *skb,
2888 2889 2890
					     int flags)
{
	struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2891
	struct rt6_info *ret = NULL;
2892
	struct fib6_result res = {};
2893 2894 2895 2896 2897 2898
	struct fib6_nh_rd_arg arg = {
		.res = &res,
		.fl6 = fl6,
		.gw  = &rdfl->gateway,
		.ret = &ret
	};
2899
	struct fib6_info *rt;
2900 2901
	struct fib6_node *fn;

D
David Ahern 已提交
2902 2903 2904 2905 2906 2907
	/* l3mdev_update_flow overrides oif if the device is enslaved; in
	 * this case we must match on the real ingress device, so reset it
	 */
	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
		fl6->flowi6_oif = skb->dev->ifindex;

2908
	/* Get the "current" route for this destination and
A
Alexander Alemayhu 已提交
2909
	 * check if the redirect has come from appropriate router.
2910 2911 2912 2913 2914 2915 2916 2917
	 *
	 * RFC 4861 specifies that redirects should only be
	 * accepted if they come from the nexthop to the target.
	 * Due to the way the routes are chosen, this notion
	 * is a bit fuzzy and one might need to check all possible
	 * routes.
	 */

2918
	rcu_read_lock();
2919
	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2920
restart:
2921
	for_each_fib6_node_rt_rcu(fn) {
2922
		res.f6i = rt;
2923
		if (fib6_check_expired(rt))
2924
			continue;
2925
		if (rt->fib6_flags & RTF_REJECT)
2926
			break;
2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940
		if (unlikely(rt->nh)) {
			if (nexthop_is_blackhole(rt->nh))
				continue;
			/* on match, res->nh is filled in and potentially ret */
			if (nexthop_for_each_fib6_nh(rt->nh,
						     fib6_nh_redirect_match,
						     &arg))
				goto out;
		} else {
			res.nh = rt->fib6_nh;
			if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
						  &ret))
				goto out;
		}
2941 2942 2943
	}

	if (!rt)
D
David Ahern 已提交
2944
		rt = net->ipv6.fib6_null_entry;
2945
	else if (rt->fib6_flags & RTF_REJECT) {
2946
		ret = net->ipv6.ip6_null_entry;
2947 2948 2949
		goto out;
	}

D
David Ahern 已提交
2950
	if (rt == net->ipv6.fib6_null_entry) {
M
Martin KaFai Lau 已提交
2951 2952 2953
		fn = fib6_backtrack(fn, &fl6->saddr);
		if (fn)
			goto restart;
2954
	}
M
Martin KaFai Lau 已提交
2955

2956
	res.f6i = rt;
2957
	res.nh = rt->fib6_nh;
2958
out:
2959
	if (ret) {
2960
		ip6_hold_safe(net, &ret);
2961 2962 2963
	} else {
		res.fib6_flags = res.f6i->fib6_flags;
		res.fib6_type = res.f6i->fib6_type;
2964
		ret = ip6_create_rt_rcu(&res);
2965
	}
2966

2967
	rcu_read_unlock();
2968

2969
	trace_fib6_table_lookup(net, &res, table, fl6);
2970
	return ret;
2971 2972 2973
};

static struct dst_entry *ip6_route_redirect(struct net *net,
D
David Ahern 已提交
2974 2975 2976
					    const struct flowi6 *fl6,
					    const struct sk_buff *skb,
					    const struct in6_addr *gateway)
2977 2978 2979 2980 2981 2982 2983
{
	int flags = RT6_LOOKUP_F_HAS_SADDR;
	struct ip6rd_flowi rdfl;

	rdfl.fl6 = *fl6;
	rdfl.gateway = *gateway;

D
David Ahern 已提交
2984
	return fib6_rule_lookup(net, &rdfl.fl6, skb,
2985 2986 2987
				flags, __ip6_route_redirect);
}

2988 2989
void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
		  kuid_t uid)
2990 2991 2992
{
	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
	struct dst_entry *dst;
2993 2994 2995 2996 2997 2998 2999 3000 3001
	struct flowi6 fl6 = {
		.flowi6_iif = LOOPBACK_IFINDEX,
		.flowi6_oif = oif,
		.flowi6_mark = mark,
		.daddr = iph->daddr,
		.saddr = iph->saddr,
		.flowlabel = ip6_flowinfo(iph),
		.flowi6_uid = uid,
	};
3002

D
David Ahern 已提交
3003
	dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3004
	rt6_do_redirect(dst, NULL, skb);
3005 3006 3007 3008
	dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_redirect);

3009
void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3010 3011 3012 3013
{
	const struct ipv6hdr *iph = ipv6_hdr(skb);
	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
	struct dst_entry *dst;
3014 3015 3016 3017 3018 3019 3020
	struct flowi6 fl6 = {
		.flowi6_iif = LOOPBACK_IFINDEX,
		.flowi6_oif = oif,
		.daddr = msg->dest,
		.saddr = iph->daddr,
		.flowi6_uid = sock_net_uid(net, NULL),
	};
3021

D
David Ahern 已提交
3022
	dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3023
	rt6_do_redirect(dst, NULL, skb);
3024 3025 3026
	dst_release(dst);
}

3027 3028
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
3029 3030
	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
		     sk->sk_uid);
3031 3032 3033
}
EXPORT_SYMBOL_GPL(ip6_sk_redirect);

3034
static unsigned int ip6_default_advmss(const struct dst_entry *dst)
L
Linus Torvalds 已提交
3035
{
3036 3037 3038 3039
	struct net_device *dev = dst->dev;
	unsigned int mtu = dst_mtu(dst);
	struct net *net = dev_net(dev);

L
Linus Torvalds 已提交
3040 3041
	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);

3042 3043
	if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
		mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
L
Linus Torvalds 已提交
3044 3045

	/*
3046 3047 3048
	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
	 * IPV6_MAXPLEN is also valid and means: "any MSS,
L
Linus Torvalds 已提交
3049 3050 3051 3052 3053 3054 3055
	 * rely only on pmtu discovery"
	 */
	if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
		mtu = IPV6_MAXPLEN;
	return mtu;
}

3056
static unsigned int ip6_mtu(const struct dst_entry *dst)
3057 3058
{
	struct inet6_dev *idev;
3059
	unsigned int mtu;
3060 3061

	mtu = dst_metric_raw(dst, RTAX_MTU);
3062
	if (mtu)
E
Eric Dumazet 已提交
3063
		goto out;
3064 3065

	mtu = IPV6_MIN_MTU;
3066 3067 3068 3069 3070 3071 3072

	rcu_read_lock();
	idev = __in6_dev_get(dst->dev);
	if (idev)
		mtu = idev->cnf.mtu6;
	rcu_read_unlock();

E
Eric Dumazet 已提交
3073
out:
3074 3075 3076
	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);

	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
3077 3078
}

3079 3080 3081 3082 3083 3084 3085 3086
/* MTU selection:
 * 1. mtu on route is locked - use it
 * 2. mtu from nexthop exception
 * 3. mtu from egress device
 *
 * based on ip6_dst_mtu_forward and exception logic of
 * rt6_find_cached_rt; called with rcu_read_lock
 */
3087 3088 3089
u32 ip6_mtu_from_fib6(const struct fib6_result *res,
		      const struct in6_addr *daddr,
		      const struct in6_addr *saddr)
3090
{
3091 3092
	const struct fib6_nh *nh = res->nh;
	struct fib6_info *f6i = res->f6i;
3093
	struct inet6_dev *idev;
3094
	struct rt6_info *rt;
3095 3096 3097 3098 3099 3100 3101 3102
	u32 mtu = 0;

	if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
		mtu = f6i->fib6_pmtu;
		if (mtu)
			goto out;
	}

3103 3104 3105 3106
	rt = rt6_find_cached_rt(res, daddr, saddr);
	if (unlikely(rt)) {
		mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
	} else {
3107
		struct net_device *dev = nh->fib_nh_dev;
3108 3109 3110 3111 3112 3113 3114 3115 3116

		mtu = IPV6_MIN_MTU;
		idev = __in6_dev_get(dev);
		if (idev && idev->cnf.mtu6 > mtu)
			mtu = idev->cnf.mtu6;
	}

	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
out:
3117
	return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3118 3119
}

3120
struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3121
				  struct flowi6 *fl6)
L
Linus Torvalds 已提交
3122
{
3123
	struct dst_entry *dst;
L
Linus Torvalds 已提交
3124 3125
	struct rt6_info *rt;
	struct inet6_dev *idev = in6_dev_get(dev);
3126
	struct net *net = dev_net(dev);
L
Linus Torvalds 已提交
3127

3128
	if (unlikely(!idev))
E
Eric Dumazet 已提交
3129
		return ERR_PTR(-ENODEV);
L
Linus Torvalds 已提交
3130

3131
	rt = ip6_dst_alloc(net, dev, 0);
3132
	if (unlikely(!rt)) {
L
Linus Torvalds 已提交
3133
		in6_dev_put(idev);
3134
		dst = ERR_PTR(-ENOMEM);
L
Linus Torvalds 已提交
3135 3136 3137
		goto out;
	}

3138
	rt->dst.flags |= DST_HOST;
3139
	rt->dst.input = ip6_input;
3140
	rt->dst.output  = ip6_output;
3141
	rt->rt6i_gateway  = fl6->daddr;
3142
	rt->rt6i_dst.addr = fl6->daddr;
3143 3144
	rt->rt6i_dst.plen = 128;
	rt->rt6i_idev     = idev;
L
Li RongQing 已提交
3145
	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
L
Linus Torvalds 已提交
3146

3147
	/* Add this dst into uncached_list so that rt6_disable_ip() can
3148 3149 3150
	 * do proper release of the net_device
	 */
	rt6_uncached_list_add(rt);
W
Wei Wang 已提交
3151
	atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
L
Linus Torvalds 已提交
3152

3153 3154
	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);

L
Linus Torvalds 已提交
3155
out:
3156
	return dst;
L
Linus Torvalds 已提交
3157 3158
}

3159
static int ip6_dst_gc(struct dst_ops *ops)
L
Linus Torvalds 已提交
3160
{
3161
	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3162 3163 3164 3165 3166
	int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
	int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
	int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
	int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
	unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3167
	int entries;
3168

3169
	entries = dst_entries_get_fast(ops);
3170
	if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
3171
	    entries <= rt_max_size)
L
Linus Torvalds 已提交
3172 3173
		goto out;

3174
	net->ipv6.ip6_rt_gc_expire++;
3175
	fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
3176 3177
	entries = dst_entries_get_slow(ops);
	if (entries < ops->gc_thresh)
3178
		net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
L
Linus Torvalds 已提交
3179
out:
3180
	net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
3181
	return entries > rt_max_size;
L
Linus Torvalds 已提交
3182 3183
}

3184 3185 3186
static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
			       const struct in6_addr *gw_addr, u32 tbid,
			       int flags, struct fib6_result *res)
3187 3188 3189 3190 3191 3192 3193
{
	struct flowi6 fl6 = {
		.flowi6_oif = cfg->fc_ifindex,
		.daddr = *gw_addr,
		.saddr = cfg->fc_prefsrc,
	};
	struct fib6_table *table;
3194
	int err;
3195

3196
	table = fib6_get_table(net, tbid);
3197
	if (!table)
3198
		return -EINVAL;
3199 3200 3201 3202

	if (!ipv6_addr_any(&cfg->fc_prefsrc))
		flags |= RT6_LOOKUP_F_HAS_SADDR;

3203
	flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3204

3205 3206 3207 3208
	err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
	if (!err && res->f6i != net->ipv6.fib6_null_entry)
		fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
				 cfg->fc_ifindex != 0, NULL, flags);
3209

3210
	return err;
3211 3212
}

3213 3214
static int ip6_route_check_nh_onlink(struct net *net,
				     struct fib6_config *cfg,
3215
				     const struct net_device *dev,
3216 3217
				     struct netlink_ext_ack *extack)
{
3218
	u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3219
	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3220
	struct fib6_result res = {};
3221 3222
	int err;

3223 3224 3225 3226 3227 3228 3229 3230
	err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
	if (!err && !(res.fib6_flags & RTF_REJECT) &&
	    /* ignore match if it is the default route */
	    !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
	    (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
		NL_SET_ERR_MSG(extack,
			       "Nexthop has invalid gateway or device mismatch");
		err = -EINVAL;
3231 3232 3233 3234 3235
	}

	return err;
}

3236 3237 3238 3239 3240 3241 3242
static int ip6_route_check_nh(struct net *net,
			      struct fib6_config *cfg,
			      struct net_device **_dev,
			      struct inet6_dev **idev)
{
	const struct in6_addr *gw_addr = &cfg->fc_gateway;
	struct net_device *dev = _dev ? *_dev : NULL;
3243 3244
	int flags = RT6_LOOKUP_F_IFACE;
	struct fib6_result res = {};
3245 3246 3247
	int err = -EHOSTUNREACH;

	if (cfg->fc_table) {
3248 3249 3250 3251 3252 3253 3254 3255 3256
		err = ip6_nh_lookup_table(net, cfg, gw_addr,
					  cfg->fc_table, flags, &res);
		/* gw_addr can not require a gateway or resolve to a reject
		 * route. If a device is given, it must match the result.
		 */
		if (err || res.fib6_flags & RTF_REJECT ||
		    res.nh->fib_nh_gw_family ||
		    (dev && dev != res.nh->fib_nh_dev))
			err = -EHOSTUNREACH;
3257 3258
	}

3259 3260 3261 3262 3263
	if (err < 0) {
		struct flowi6 fl6 = {
			.flowi6_oif = cfg->fc_ifindex,
			.daddr = *gw_addr,
		};
3264

3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275
		err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
		if (err || res.fib6_flags & RTF_REJECT ||
		    res.nh->fib_nh_gw_family)
			err = -EHOSTUNREACH;

		if (err)
			return err;

		fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
				 cfg->fc_ifindex != 0, NULL, flags);
	}
3276

3277
	err = 0;
3278
	if (dev) {
3279 3280
		if (dev != res.nh->fib_nh_dev)
			err = -EHOSTUNREACH;
3281
	} else {
3282
		*_dev = dev = res.nh->fib_nh_dev;
3283
		dev_hold(dev);
3284
		*idev = in6_dev_get(dev);
3285 3286 3287 3288 3289
	}

	return err;
}

3290 3291 3292 3293 3294 3295
static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
			   struct net_device **_dev, struct inet6_dev **idev,
			   struct netlink_ext_ack *extack)
{
	const struct in6_addr *gw_addr = &cfg->fc_gateway;
	int gwa_type = ipv6_addr_type(gw_addr);
3296
	bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3297
	const struct net_device *dev = *_dev;
3298
	bool need_addr_check = !dev;
3299 3300 3301 3302 3303 3304 3305
	int err = -EINVAL;

	/* if gw_addr is local we will fail to detect this in case
	 * address is still TENTATIVE (DAD in progress). rt6_lookup()
	 * will return already-added prefix route via interface that
	 * prefix route was assigned to, which might be non-loopback.
	 */
3306 3307 3308
	if (dev &&
	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326
		goto out;
	}

	if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
		/* IPv6 strictly inhibits using not link-local
		 * addresses as nexthop address.
		 * Otherwise, router will not able to send redirects.
		 * It is very good, but in some (rare!) circumstances
		 * (SIT, PtP, NBMA NOARP links) it is handy to allow
		 * some exceptions. --ANK
		 * We allow IPv4-mapped nexthops to support RFC4798-type
		 * addressing
		 */
		if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
			NL_SET_ERR_MSG(extack, "Invalid gateway address");
			goto out;
		}

3327 3328
		rcu_read_lock();

3329 3330 3331 3332 3333
		if (cfg->fc_flags & RTNH_F_ONLINK)
			err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
		else
			err = ip6_route_check_nh(net, cfg, _dev, idev);

3334 3335
		rcu_read_unlock();

3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351
		if (err)
			goto out;
	}

	/* reload in case device was changed */
	dev = *_dev;

	err = -EINVAL;
	if (!dev) {
		NL_SET_ERR_MSG(extack, "Egress device not specified");
		goto out;
	} else if (dev->flags & IFF_LOOPBACK) {
		NL_SET_ERR_MSG(extack,
			       "Egress device can not be loopback device for this route");
		goto out;
	}
3352 3353 3354 3355 3356 3357 3358 3359 3360 3361

	/* if we did not check gw_addr above, do so now that the
	 * egress device has been resolved.
	 */
	if (need_addr_check &&
	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
		goto out;
	}

3362 3363 3364 3365 3366
	err = 0;
out:
	return err;
}

3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386
static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
{
	if ((flags & RTF_REJECT) ||
	    (dev && (dev->flags & IFF_LOOPBACK) &&
	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
	     !(flags & RTF_LOCAL)))
		return true;

	return false;
}

int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
		 struct fib6_config *cfg, gfp_t gfp_flags,
		 struct netlink_ext_ack *extack)
{
	struct net_device *dev = NULL;
	struct inet6_dev *idev = NULL;
	int addr_type;
	int err;

3387 3388
	fib6_nh->fib_nh_family = AF_INET6;

3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411
	err = -ENODEV;
	if (cfg->fc_ifindex) {
		dev = dev_get_by_index(net, cfg->fc_ifindex);
		if (!dev)
			goto out;
		idev = in6_dev_get(dev);
		if (!idev)
			goto out;
	}

	if (cfg->fc_flags & RTNH_F_ONLINK) {
		if (!dev) {
			NL_SET_ERR_MSG(extack,
				       "Nexthop device required for onlink");
			goto out;
		}

		if (!(dev->flags & IFF_UP)) {
			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
			err = -ENETDOWN;
			goto out;
		}

D
David Ahern 已提交
3412
		fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3413 3414
	}

D
David Ahern 已提交
3415
	fib6_nh->fib_nh_weight = 1;
3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435

	/* We cannot add true routes via loopback here,
	 * they would result in kernel looping; promote them to reject routes
	 */
	addr_type = ipv6_addr_type(&cfg->fc_dst);
	if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
		/* hold loopback dev/idev if we haven't done so. */
		if (dev != net->loopback_dev) {
			if (dev) {
				dev_put(dev);
				in6_dev_put(idev);
			}
			dev = net->loopback_dev;
			dev_hold(dev);
			idev = in6_dev_get(dev);
			if (!idev) {
				err = -ENODEV;
				goto out;
			}
		}
3436
		goto pcpu_alloc;
3437 3438 3439 3440 3441 3442 3443
	}

	if (cfg->fc_flags & RTF_GATEWAY) {
		err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
		if (err)
			goto out;

D
David Ahern 已提交
3444
		fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3445
		fib6_nh->fib_nh_gw_family = AF_INET6;
3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465
	}

	err = -ENODEV;
	if (!dev)
		goto out;

	if (idev->cnf.disable_ipv6) {
		NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
		err = -EACCES;
		goto out;
	}

	if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
		NL_SET_ERR_MSG(extack, "Nexthop device is not up");
		err = -ENETDOWN;
		goto out;
	}

	if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
	    !netif_carrier_ok(dev))
D
David Ahern 已提交
3466
		fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3467

3468 3469 3470 3471 3472 3473
	err = fib_nh_common_init(&fib6_nh->nh_common, cfg->fc_encap,
				 cfg->fc_encap_type, cfg, gfp_flags, extack);
	if (err)
		goto out;

pcpu_alloc:
3474 3475 3476 3477 3478 3479
	fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
	if (!fib6_nh->rt6i_pcpu) {
		err = -ENOMEM;
		goto out;
	}

D
David Ahern 已提交
3480
	fib6_nh->fib_nh_dev = dev;
3481
	fib6_nh->fib_nh_oif = dev->ifindex;
3482 3483 3484 3485 3486 3487
	err = 0;
out:
	if (idev)
		in6_dev_put(idev);

	if (err) {
D
David Ahern 已提交
3488 3489
		lwtstate_put(fib6_nh->fib_nh_lws);
		fib6_nh->fib_nh_lws = NULL;
3490 3491 3492 3493 3494 3495 3496
		if (dev)
			dev_put(dev);
	}

	return err;
}

3497 3498
void fib6_nh_release(struct fib6_nh *fib6_nh)
{
3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511
	struct rt6_exception_bucket *bucket;

	rcu_read_lock();

	fib6_nh_flush_exceptions(fib6_nh, NULL);
	bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
	if (bucket) {
		rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
		kfree(bucket);
	}

	rcu_read_unlock();

3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
	if (fib6_nh->rt6i_pcpu) {
		int cpu;

		for_each_possible_cpu(cpu) {
			struct rt6_info **ppcpu_rt;
			struct rt6_info *pcpu_rt;

			ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
			pcpu_rt = *ppcpu_rt;
			if (pcpu_rt) {
				dst_dev_put(&pcpu_rt->dst);
				dst_release(&pcpu_rt->dst);
				*ppcpu_rt = NULL;
			}
		}

		free_percpu(fib6_nh->rt6i_pcpu);
	}

3531
	fib_nh_common_release(&fib6_nh->nh_common);
3532 3533
}

3534
static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3535
					      gfp_t gfp_flags,
3536
					      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
3537
{
3538
	struct net *net = cfg->fc_nlinfo.nl_net;
3539
	struct fib6_info *rt = NULL;
3540
	struct nexthop *nh = NULL;
T
Thomas Graf 已提交
3541
	struct fib6_table *table;
3542
	struct fib6_nh *fib6_nh;
3543
	int err = -EINVAL;
3544
	int addr_type;
L
Linus Torvalds 已提交
3545

3546
	/* RTF_PCPU is an internal flag; can not be set by userspace */
3547 3548
	if (cfg->fc_flags & RTF_PCPU) {
		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3549
		goto out;
3550
	}
3551

3552 3553 3554 3555 3556 3557
	/* RTF_CACHE is an internal flag; can not be set by userspace */
	if (cfg->fc_flags & RTF_CACHE) {
		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
		goto out;
	}

3558 3559 3560 3561 3562
	if (cfg->fc_type > RTN_MAX) {
		NL_SET_ERR_MSG(extack, "Invalid route type");
		goto out;
	}

3563 3564 3565 3566 3567 3568
	if (cfg->fc_dst_len > 128) {
		NL_SET_ERR_MSG(extack, "Invalid prefix length");
		goto out;
	}
	if (cfg->fc_src_len > 128) {
		NL_SET_ERR_MSG(extack, "Invalid source address length");
3569
		goto out;
3570
	}
L
Linus Torvalds 已提交
3571
#ifndef CONFIG_IPV6_SUBTREES
3572 3573 3574
	if (cfg->fc_src_len) {
		NL_SET_ERR_MSG(extack,
			       "Specifying source address requires IPV6_SUBTREES to be enabled");
3575
		goto out;
3576
	}
L
Linus Torvalds 已提交
3577
#endif
3578 3579 3580 3581 3582 3583 3584 3585 3586 3587
	if (cfg->fc_nh_id) {
		nh = nexthop_find_by_id(net, cfg->fc_nh_id);
		if (!nh) {
			NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
			goto out;
		}
		err = fib6_check_nexthop(nh, cfg, extack);
		if (err)
			goto out;
	}
3588

3589
	err = -ENOBUFS;
3590 3591
	if (cfg->fc_nlinfo.nlh &&
	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3592
		table = fib6_get_table(net, cfg->fc_table);
3593
		if (!table) {
3594
			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3595 3596 3597 3598 3599
			table = fib6_new_table(net, cfg->fc_table);
		}
	} else {
		table = fib6_new_table(net, cfg->fc_table);
	}
3600 3601

	if (!table)
T
Thomas Graf 已提交
3602 3603
		goto out;

3604
	err = -ENOMEM;
3605
	rt = fib6_info_alloc(gfp_flags, !nh);
3606
	if (!rt)
L
Linus Torvalds 已提交
3607
		goto out;
3608

3609 3610
	rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
					       extack);
3611 3612
	if (IS_ERR(rt->fib6_metrics)) {
		err = PTR_ERR(rt->fib6_metrics);
3613 3614
		/* Do not leave garbage there. */
		rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3615 3616 3617
		goto out;
	}

3618 3619
	if (cfg->fc_flags & RTF_ADDRCONF)
		rt->dst_nocount = true;
L
Linus Torvalds 已提交
3620

3621
	if (cfg->fc_flags & RTF_EXPIRES)
3622
		fib6_set_expires(rt, jiffies +
3623 3624
				clock_t_to_jiffies(cfg->fc_expires));
	else
3625
		fib6_clean_expires(rt);
L
Linus Torvalds 已提交
3626

3627 3628
	if (cfg->fc_protocol == RTPROT_UNSPEC)
		cfg->fc_protocol = RTPROT_BOOT;
3629
	rt->fib6_protocol = cfg->fc_protocol;
3630

3631 3632
	rt->fib6_table = table;
	rt->fib6_metric = cfg->fc_metric;
3633
	rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3634
	rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3635

3636 3637 3638
	ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
	rt->fib6_dst.plen = cfg->fc_dst_len;
	if (rt->fib6_dst.plen == 128)
3639
		rt->dst_host = true;
3640

L
Linus Torvalds 已提交
3641
#ifdef CONFIG_IPV6_SUBTREES
3642 3643
	ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
	rt->fib6_src.plen = cfg->fc_src_len;
L
Linus Torvalds 已提交
3644
#endif
3645 3646 3647 3648 3649 3650
	if (nh) {
		if (!nexthop_get(nh)) {
			NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
			goto out;
		}
		if (rt->fib6_src.plen) {
3651
			NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3652 3653 3654 3655 3656 3657 3658 3659
			goto out;
		}
		rt->nh = nh;
		fib6_nh = nexthop_fib6_nh(rt->nh);
	} else {
		err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
		if (err)
			goto out;
L
Linus Torvalds 已提交
3660

3661 3662 3663 3664 3665 3666 3667 3668 3669 3670
		fib6_nh = rt->fib6_nh;

		/* We cannot add true routes via loopback here, they would
		 * result in kernel looping; promote them to reject routes
		 */
		addr_type = ipv6_addr_type(&cfg->fc_dst);
		if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
				   addr_type))
			rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
	}
3671

3672
	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3673
		struct net_device *dev = fib6_nh->fib_nh_dev;
3674

3675
		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3676
			NL_SET_ERR_MSG(extack, "Invalid source address");
3677 3678 3679
			err = -EINVAL;
			goto out;
		}
3680 3681
		rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
		rt->fib6_prefsrc.plen = 128;
3682
	} else
3683
		rt->fib6_prefsrc.plen = 0;
3684

3685
	return rt;
3686
out:
3687
	fib6_info_release(rt);
3688
	return ERR_PTR(err);
3689 3690
}

3691
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3692
		  struct netlink_ext_ack *extack)
3693
{
3694
	struct fib6_info *rt;
3695 3696
	int err;

3697
	rt = ip6_route_info_create(cfg, gfp_flags, extack);
3698 3699
	if (IS_ERR(rt))
		return PTR_ERR(rt);
3700

3701
	err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3702
	fib6_info_release(rt);
3703

L
Linus Torvalds 已提交
3704 3705 3706
	return err;
}

3707
static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
L
Linus Torvalds 已提交
3708
{
3709
	struct net *net = info->nl_net;
T
Thomas Graf 已提交
3710
	struct fib6_table *table;
3711
	int err;
L
Linus Torvalds 已提交
3712

D
David Ahern 已提交
3713
	if (rt == net->ipv6.fib6_null_entry) {
3714 3715 3716
		err = -ENOENT;
		goto out;
	}
3717

3718
	table = rt->fib6_table;
3719
	spin_lock_bh(&table->tb6_lock);
3720
	err = fib6_del(rt, info);
3721
	spin_unlock_bh(&table->tb6_lock);
L
Linus Torvalds 已提交
3722

3723
out:
3724
	fib6_info_release(rt);
L
Linus Torvalds 已提交
3725 3726 3727
	return err;
}

3728
int ip6_del_rt(struct net *net, struct fib6_info *rt)
3729
{
3730 3731
	struct nl_info info = { .nl_net = net };

3732
	return __ip6_del_rt(rt, &info);
3733 3734
}

3735
static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3736 3737
{
	struct nl_info *info = &cfg->fc_nlinfo;
3738
	struct net *net = info->nl_net;
3739
	struct sk_buff *skb = NULL;
3740
	struct fib6_table *table;
3741
	int err = -ENOENT;
3742

D
David Ahern 已提交
3743
	if (rt == net->ipv6.fib6_null_entry)
3744
		goto out_put;
3745
	table = rt->fib6_table;
3746
	spin_lock_bh(&table->tb6_lock);
3747

3748
	if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3749
		struct fib6_info *sibling, *next_sibling;
3750

3751 3752 3753 3754 3755
		/* prefer to send a single notification with all hops */
		skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
		if (skb) {
			u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;

3756
			if (rt6_fill_node(net, skb, rt, NULL,
3757 3758 3759 3760 3761 3762 3763 3764
					  NULL, NULL, 0, RTM_DELROUTE,
					  info->portid, seq, 0) < 0) {
				kfree_skb(skb);
				skb = NULL;
			} else
				info->skip_notify = 1;
		}

3765 3766 3767 3768 3769 3770
		info->skip_notify_kernel = 1;
		call_fib6_multipath_entry_notifiers(net,
						    FIB_EVENT_ENTRY_DEL,
						    rt,
						    rt->fib6_nsiblings,
						    NULL);
3771
		list_for_each_entry_safe(sibling, next_sibling,
3772 3773
					 &rt->fib6_siblings,
					 fib6_siblings) {
3774 3775
			err = fib6_del(sibling, info);
			if (err)
3776
				goto out_unlock;
3777 3778 3779 3780
		}
	}

	err = fib6_del(rt, info);
3781
out_unlock:
3782
	spin_unlock_bh(&table->tb6_lock);
3783
out_put:
3784
	fib6_info_release(rt);
3785 3786

	if (skb) {
3787
		rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3788 3789
			    info->nlh, gfp_any());
	}
3790 3791 3792
	return err;
}

3793
static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3794 3795 3796 3797 3798 3799 3800 3801 3802
{
	int rc = -ESRCH;

	if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
		goto out;

	if (cfg->fc_flags & RTF_GATEWAY &&
	    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
		goto out;
3803 3804

	rc = rt6_remove_exception_rt(rt);
3805 3806 3807 3808
out:
	return rc;
}

3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824
static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
			     struct fib6_nh *nh)
{
	struct fib6_result res = {
		.f6i = rt,
		.nh = nh,
	};
	struct rt6_info *rt_cache;

	rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
	if (rt_cache)
		return __ip6_del_cached_rt(rt_cache, cfg);

	return 0;
}

3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848
struct fib6_nh_del_cached_rt_arg {
	struct fib6_config *cfg;
	struct fib6_info *f6i;
};

static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
{
	struct fib6_nh_del_cached_rt_arg *arg = _arg;
	int rc;

	rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
	return rc != -ESRCH ? rc : 0;
}

static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
{
	struct fib6_nh_del_cached_rt_arg arg = {
		.cfg = cfg,
		.f6i = f6i
	};

	return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
}

3849 3850
static int ip6_route_del(struct fib6_config *cfg,
			 struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
3851
{
T
Thomas Graf 已提交
3852
	struct fib6_table *table;
3853
	struct fib6_info *rt;
L
Linus Torvalds 已提交
3854 3855 3856
	struct fib6_node *fn;
	int err = -ESRCH;

3857
	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3858 3859
	if (!table) {
		NL_SET_ERR_MSG(extack, "FIB table does not exist");
T
Thomas Graf 已提交
3860
		return err;
3861
	}
T
Thomas Graf 已提交
3862

3863
	rcu_read_lock();
L
Linus Torvalds 已提交
3864

T
Thomas Graf 已提交
3865
	fn = fib6_locate(&table->tb6_root,
3866
			 &cfg->fc_dst, cfg->fc_dst_len,
3867
			 &cfg->fc_src, cfg->fc_src_len,
3868
			 !(cfg->fc_flags & RTF_CACHE));
3869

L
Linus Torvalds 已提交
3870
	if (fn) {
3871
		for_each_fib6_node_rt_rcu(fn) {
D
David Ahern 已提交
3872 3873
			struct fib6_nh *nh;

3874 3875
			if (rt->nh && cfg->fc_nh_id &&
			    rt->nh->id != cfg->fc_nh_id)
3876
				continue;
3877

3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888
			if (cfg->fc_flags & RTF_CACHE) {
				int rc = 0;

				if (rt->nh) {
					rc = ip6_del_cached_rt_nh(cfg, rt);
				} else if (cfg->fc_nh_id) {
					continue;
				} else {
					nh = rt->fib6_nh;
					rc = ip6_del_cached_rt(cfg, rt, nh);
				}
3889 3890 3891
				if (rc != -ESRCH) {
					rcu_read_unlock();
					return rc;
3892 3893
				}
				continue;
3894
			}
D
David Ahern 已提交
3895

3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912
			if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
				continue;
			if (cfg->fc_protocol &&
			    cfg->fc_protocol != rt->fib6_protocol)
				continue;

			if (rt->nh) {
				if (!fib6_info_hold_safe(rt))
					continue;
				rcu_read_unlock();

				return __ip6_del_rt(rt, &cfg->fc_nlinfo);
			}
			if (cfg->fc_nh_id)
				continue;

			nh = rt->fib6_nh;
3913
			if (cfg->fc_ifindex &&
D
David Ahern 已提交
3914 3915
			    (!nh->fib_nh_dev ||
			     nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
L
Linus Torvalds 已提交
3916
				continue;
3917
			if (cfg->fc_flags & RTF_GATEWAY &&
D
David Ahern 已提交
3918
			    !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
L
Linus Torvalds 已提交
3919
				continue;
3920 3921
			if (!fib6_info_hold_safe(rt))
				continue;
3922
			rcu_read_unlock();
L
Linus Torvalds 已提交
3923

3924 3925 3926 3927 3928
			/* if gateway was specified only delete the one hop */
			if (cfg->fc_flags & RTF_GATEWAY)
				return __ip6_del_rt(rt, &cfg->fc_nlinfo);

			return __ip6_del_rt_siblings(rt, cfg);
L
Linus Torvalds 已提交
3929 3930
		}
	}
3931
	rcu_read_unlock();
L
Linus Torvalds 已提交
3932 3933 3934 3935

	return err;
}

3936
static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
3937 3938
{
	struct netevent_redirect netevent;
3939
	struct rt6_info *rt, *nrt = NULL;
3940
	struct fib6_result res = {};
3941 3942 3943
	struct ndisc_options ndopts;
	struct inet6_dev *in6_dev;
	struct neighbour *neigh;
3944
	struct rd_msg *msg;
3945 3946
	int optlen, on_link;
	u8 *lladdr;
3947

3948
	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
3949
	optlen -= sizeof(*msg);
3950 3951

	if (optlen < 0) {
3952
		net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
3953 3954 3955
		return;
	}

3956
	msg = (struct rd_msg *)icmp6_hdr(skb);
3957

3958
	if (ipv6_addr_is_multicast(&msg->dest)) {
3959
		net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
3960 3961 3962
		return;
	}

3963
	on_link = 0;
3964
	if (ipv6_addr_equal(&msg->dest, &msg->target)) {
3965
		on_link = 1;
3966
	} else if (ipv6_addr_type(&msg->target) !=
3967
		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
3968
		net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982
		return;
	}

	in6_dev = __in6_dev_get(skb->dev);
	if (!in6_dev)
		return;
	if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
		return;

	/* RFC2461 8.1:
	 *	The IP source address of the Redirect MUST be the same as the current
	 *	first-hop router for the specified ICMP Destination Address.
	 */

3983
	if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3984 3985 3986
		net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
		return;
	}
3987 3988

	lladdr = NULL;
3989 3990 3991 3992 3993 3994 3995 3996 3997
	if (ndopts.nd_opts_tgt_lladdr) {
		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
					     skb->dev);
		if (!lladdr) {
			net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
			return;
		}
	}

3998
	rt = (struct rt6_info *) dst;
3999
	if (rt->rt6i_flags & RTF_REJECT) {
4000
		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4001
		return;
4002
	}
4003

4004 4005 4006 4007
	/* Redirect received -> path was valid.
	 * Look, redirects are sent only in response to data packets,
	 * so that this nexthop apparently is reachable. --ANK
	 */
4008
	dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4009

4010
	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4011 4012
	if (!neigh)
		return;
4013

L
Linus Torvalds 已提交
4014 4015 4016 4017
	/*
	 *	We have finally decided to accept it.
	 */

4018
	ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
L
Linus Torvalds 已提交
4019 4020 4021
		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
		     NEIGH_UPDATE_F_OVERRIDE|
		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4022 4023
				     NEIGH_UPDATE_F_ISROUTER)),
		     NDISC_REDIRECT, &ndopts);
L
Linus Torvalds 已提交
4024

4025
	rcu_read_lock();
4026
	res.f6i = rcu_dereference(rt->from);
4027
	if (!res.f6i)
4028
		goto out;
4029

4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048
	if (res.f6i->nh) {
		struct fib6_nh_match_arg arg = {
			.dev = dst->dev,
			.gw = &rt->rt6i_gateway,
		};

		nexthop_for_each_fib6_nh(res.f6i->nh,
					 fib6_nh_find_match, &arg);

		/* fib6_info uses a nexthop that does not have fib6_nh
		 * using the dst->dev. Should be impossible
		 */
		if (!arg.match)
			goto out;
		res.nh = arg.match;
	} else {
		res.nh = res.f6i->fib6_nh;
	}

4049 4050
	res.fib6_flags = res.f6i->fib6_flags;
	res.fib6_type = res.f6i->fib6_type;
4051
	nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4052
	if (!nrt)
L
Linus Torvalds 已提交
4053 4054 4055 4056 4057 4058
		goto out;

	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
	if (on_link)
		nrt->rt6i_flags &= ~RTF_GATEWAY;

A
Alexey Dobriyan 已提交
4059
	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
L
Linus Torvalds 已提交
4060

4061
	/* rt6_insert_exception() will take care of duplicated exceptions */
4062
	if (rt6_insert_exception(nrt, &res)) {
4063 4064 4065
		dst_release_immediate(&nrt->dst);
		goto out;
	}
L
Linus Torvalds 已提交
4066

4067 4068
	netevent.old = &rt->dst;
	netevent.new = &nrt->dst;
4069
	netevent.daddr = &msg->dest;
4070
	netevent.neigh = neigh;
4071 4072
	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);

L
Linus Torvalds 已提交
4073
out:
4074
	rcu_read_unlock();
4075
	neigh_release(neigh);
4076 4077
}

4078
#ifdef CONFIG_IPV6_ROUTE_INFO
4079
static struct fib6_info *rt6_get_route_info(struct net *net,
4080
					   const struct in6_addr *prefix, int prefixlen,
4081 4082
					   const struct in6_addr *gwaddr,
					   struct net_device *dev)
4083
{
4084 4085
	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
	int ifindex = dev->ifindex;
4086
	struct fib6_node *fn;
4087
	struct fib6_info *rt = NULL;
T
Thomas Graf 已提交
4088 4089
	struct fib6_table *table;

4090
	table = fib6_get_table(net, tb_id);
4091
	if (!table)
T
Thomas Graf 已提交
4092
		return NULL;
4093

4094
	rcu_read_lock();
4095
	fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4096 4097 4098
	if (!fn)
		goto out;

4099
	for_each_fib6_node_rt_rcu(fn) {
4100 4101 4102
		/* these routes do not use nexthops */
		if (rt->nh)
			continue;
4103
		if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4104
			continue;
4105
		if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4106
		    !rt->fib6_nh->fib_nh_gw_family)
4107
			continue;
4108
		if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4109
			continue;
4110 4111
		if (!fib6_info_hold_safe(rt))
			continue;
4112 4113 4114
		break;
	}
out:
4115
	rcu_read_unlock();
4116 4117 4118
	return rt;
}

4119
static struct fib6_info *rt6_add_route_info(struct net *net,
4120
					   const struct in6_addr *prefix, int prefixlen,
4121 4122
					   const struct in6_addr *gwaddr,
					   struct net_device *dev,
4123
					   unsigned int pref)
4124
{
4125
	struct fib6_config cfg = {
4126
		.fc_metric	= IP6_RT_PRIO_USER,
4127
		.fc_ifindex	= dev->ifindex,
4128 4129 4130
		.fc_dst_len	= prefixlen,
		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
				  RTF_UP | RTF_PREF(pref),
4131
		.fc_protocol = RTPROT_RA,
4132
		.fc_type = RTN_UNICAST,
4133
		.fc_nlinfo.portid = 0,
4134 4135
		.fc_nlinfo.nlh = NULL,
		.fc_nlinfo.nl_net = net,
4136 4137
	};

4138
	cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
A
Alexey Dobriyan 已提交
4139 4140
	cfg.fc_dst = *prefix;
	cfg.fc_gateway = *gwaddr;
4141

4142 4143
	/* We should treat it as a default route if prefix length is 0. */
	if (!prefixlen)
4144
		cfg.fc_flags |= RTF_DEFAULT;
4145

4146
	ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4147

4148
	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4149 4150 4151
}
#endif

4152
struct fib6_info *rt6_get_dflt_router(struct net *net,
4153 4154
				     const struct in6_addr *addr,
				     struct net_device *dev)
4155
{
4156
	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4157
	struct fib6_info *rt;
T
Thomas Graf 已提交
4158
	struct fib6_table *table;
L
Linus Torvalds 已提交
4159

4160
	table = fib6_get_table(net, tb_id);
4161
	if (!table)
T
Thomas Graf 已提交
4162
		return NULL;
L
Linus Torvalds 已提交
4163

4164 4165
	rcu_read_lock();
	for_each_fib6_node_rt_rcu(&table->tb6_root) {
4166
		struct fib6_nh *nh;
D
David Ahern 已提交
4167

4168 4169 4170 4171 4172
		/* RA routes do not use nexthops */
		if (rt->nh)
			continue;

		nh = rt->fib6_nh;
D
David Ahern 已提交
4173
		if (dev == nh->fib_nh_dev &&
4174
		    ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
D
David Ahern 已提交
4175
		    ipv6_addr_equal(&nh->fib_nh_gw6, addr))
L
Linus Torvalds 已提交
4176 4177
			break;
	}
4178 4179
	if (rt && !fib6_info_hold_safe(rt))
		rt = NULL;
4180
	rcu_read_unlock();
L
Linus Torvalds 已提交
4181 4182 4183
	return rt;
}

4184
struct fib6_info *rt6_add_dflt_router(struct net *net,
4185
				     const struct in6_addr *gwaddr,
4186 4187
				     struct net_device *dev,
				     unsigned int pref)
L
Linus Torvalds 已提交
4188
{
4189
	struct fib6_config cfg = {
D
David Ahern 已提交
4190
		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4191
		.fc_metric	= IP6_RT_PRIO_USER,
4192 4193 4194
		.fc_ifindex	= dev->ifindex,
		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
				  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4195
		.fc_protocol = RTPROT_RA,
4196
		.fc_type = RTN_UNICAST,
4197
		.fc_nlinfo.portid = 0,
4198
		.fc_nlinfo.nlh = NULL,
4199
		.fc_nlinfo.nl_net = net,
4200
	};
L
Linus Torvalds 已提交
4201

A
Alexey Dobriyan 已提交
4202
	cfg.fc_gateway = *gwaddr;
L
Linus Torvalds 已提交
4203

4204
	if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4205 4206 4207 4208 4209 4210
		struct fib6_table *table;

		table = fib6_get_table(dev_net(dev), cfg.fc_table);
		if (table)
			table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
	}
L
Linus Torvalds 已提交
4211

4212
	return rt6_get_dflt_router(net, gwaddr, dev);
L
Linus Torvalds 已提交
4213 4214
}

4215 4216
static void __rt6_purge_dflt_routers(struct net *net,
				     struct fib6_table *table)
L
Linus Torvalds 已提交
4217
{
4218
	struct fib6_info *rt;
L
Linus Torvalds 已提交
4219 4220

restart:
4221 4222
	rcu_read_lock();
	for_each_fib6_node_rt_rcu(&table->tb6_root) {
D
David Ahern 已提交
4223 4224 4225
		struct net_device *dev = fib6_info_nh_dev(rt);
		struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;

4226
		if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4227 4228
		    (!idev || idev->cnf.accept_ra != 2) &&
		    fib6_info_hold_safe(rt)) {
4229 4230
			rcu_read_unlock();
			ip6_del_rt(net, rt);
L
Linus Torvalds 已提交
4231 4232 4233
			goto restart;
		}
	}
4234
	rcu_read_unlock();
4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250

	table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
}

void rt6_purge_dflt_routers(struct net *net)
{
	struct fib6_table *table;
	struct hlist_head *head;
	unsigned int h;

	rcu_read_lock();

	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
		head = &net->ipv6.fib_table_hash[h];
		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
			if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4251
				__rt6_purge_dflt_routers(net, table);
4252 4253 4254 4255
		}
	}

	rcu_read_unlock();
L
Linus Torvalds 已提交
4256 4257
}

4258 4259
static void rtmsg_to_fib6_config(struct net *net,
				 struct in6_rtmsg *rtmsg,
4260 4261
				 struct fib6_config *cfg)
{
4262 4263 4264 4265
	*cfg = (struct fib6_config){
		.fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
			 : RT6_TABLE_MAIN,
		.fc_ifindex = rtmsg->rtmsg_ifindex,
4266
		.fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278
		.fc_expires = rtmsg->rtmsg_info,
		.fc_dst_len = rtmsg->rtmsg_dst_len,
		.fc_src_len = rtmsg->rtmsg_src_len,
		.fc_flags = rtmsg->rtmsg_flags,
		.fc_type = rtmsg->rtmsg_type,

		.fc_nlinfo.nl_net = net,

		.fc_dst = rtmsg->rtmsg_dst,
		.fc_src = rtmsg->rtmsg_src,
		.fc_gateway = rtmsg->rtmsg_gateway,
	};
4279 4280
}

4281
int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
L
Linus Torvalds 已提交
4282
{
4283
	struct fib6_config cfg;
L
Linus Torvalds 已提交
4284 4285 4286
	struct in6_rtmsg rtmsg;
	int err;

4287
	switch (cmd) {
L
Linus Torvalds 已提交
4288 4289
	case SIOCADDRT:		/* Add a route */
	case SIOCDELRT:		/* Delete a route */
4290
		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
L
Linus Torvalds 已提交
4291 4292 4293 4294 4295
			return -EPERM;
		err = copy_from_user(&rtmsg, arg,
				     sizeof(struct in6_rtmsg));
		if (err)
			return -EFAULT;
4296

4297
		rtmsg_to_fib6_config(net, &rtmsg, &cfg);
4298

L
Linus Torvalds 已提交
4299 4300 4301
		rtnl_lock();
		switch (cmd) {
		case SIOCADDRT:
4302
			err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
L
Linus Torvalds 已提交
4303 4304
			break;
		case SIOCDELRT:
4305
			err = ip6_route_del(&cfg, NULL);
L
Linus Torvalds 已提交
4306 4307 4308 4309 4310 4311 4312
			break;
		default:
			err = -EINVAL;
		}
		rtnl_unlock();

		return err;
4313
	}
L
Linus Torvalds 已提交
4314 4315 4316 4317 4318 4319 4320 4321

	return -EINVAL;
}

/*
 *	Drop the packet on the floor
 */

4322
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
L
Linus Torvalds 已提交
4323
{
E
Eric Dumazet 已提交
4324
	struct dst_entry *dst = skb_dst(skb);
4325 4326 4327 4328 4329 4330 4331 4332 4333 4334
	struct net *net = dev_net(dst->dev);
	struct inet6_dev *idev;
	int type;

	if (netif_is_l3_master(skb->dev) &&
	    dst->dev == net->loopback_dev)
		idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
	else
		idev = ip6_dst_idev(dst);

4335 4336
	switch (ipstats_mib_noroutes) {
	case IPSTATS_MIB_INNOROUTES:
4337
		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
U
Ulrich Weber 已提交
4338
		if (type == IPV6_ADDR_ANY) {
4339
			IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4340 4341 4342 4343
			break;
		}
		/* FALLTHROUGH */
	case IPSTATS_MIB_OUTNOROUTES:
4344
		IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4345 4346
		break;
	}
4347 4348 4349 4350 4351

	/* Start over by dropping the dst for l3mdev case */
	if (netif_is_l3_master(skb->dev))
		skb_dst_drop(skb);

4352
	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
L
Linus Torvalds 已提交
4353 4354 4355 4356
	kfree_skb(skb);
	return 0;
}

4357 4358
static int ip6_pkt_discard(struct sk_buff *skb)
{
4359
	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4360 4361
}

E
Eric W. Biederman 已提交
4362
static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
4363
{
E
Eric Dumazet 已提交
4364
	skb->dev = skb_dst(skb)->dev;
4365
	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
L
Linus Torvalds 已提交
4366 4367
}

4368 4369
static int ip6_pkt_prohibit(struct sk_buff *skb)
{
4370
	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4371 4372
}

E
Eric W. Biederman 已提交
4373
static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4374
{
E
Eric Dumazet 已提交
4375
	skb->dev = skb_dst(skb)->dev;
4376
	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4377 4378
}

L
Linus Torvalds 已提交
4379 4380 4381 4382
/*
 *	Allocate a dst for local (unicast / anycast) address.
 */

4383 4384 4385 4386
struct fib6_info *addrconf_f6i_alloc(struct net *net,
				     struct inet6_dev *idev,
				     const struct in6_addr *addr,
				     bool anycast, gfp_t gfp_flags)
L
Linus Torvalds 已提交
4387
{
4388 4389 4390 4391 4392 4393 4394 4395 4396 4397
	struct fib6_config cfg = {
		.fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
		.fc_ifindex = idev->dev->ifindex,
		.fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP,
		.fc_dst = *addr,
		.fc_dst_len = 128,
		.fc_protocol = RTPROT_KERNEL,
		.fc_nlinfo.nl_net = net,
		.fc_ignore_dev_down = true,
	};
L
Linus Torvalds 已提交
4398

4399
	if (anycast) {
4400 4401
		cfg.fc_type = RTN_ANYCAST;
		cfg.fc_flags |= RTF_ANYCAST;
4402
	} else {
4403 4404
		cfg.fc_type = RTN_LOCAL;
		cfg.fc_flags |= RTF_LOCAL;
4405
	}
L
Linus Torvalds 已提交
4406

4407
	return ip6_route_info_create(&cfg, gfp_flags, NULL);
L
Linus Torvalds 已提交
4408 4409
}

4410 4411 4412 4413 4414 4415 4416
/* remove deleted ip from prefsrc entries */
struct arg_dev_net_ip {
	struct net_device *dev;
	struct net *net;
	struct in6_addr *addr;
};

4417
static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4418 4419 4420 4421 4422
{
	struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;

4423 4424
	if (!rt->nh &&
	    ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
D
David Ahern 已提交
4425
	    rt != net->ipv6.fib6_null_entry &&
4426
	    ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
4427
		spin_lock_bh(&rt6_exception_lock);
4428
		/* remove prefsrc entry */
4429
		rt->fib6_prefsrc.plen = 0;
4430
		spin_unlock_bh(&rt6_exception_lock);
4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442
	}
	return 0;
}

void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
{
	struct net *net = dev_net(ifp->idev->dev);
	struct arg_dev_net_ip adni = {
		.dev = ifp->idev->dev,
		.net = net,
		.addr = &ifp->addr,
	};
4443
	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4444 4445
}

4446
#define RTF_RA_ROUTER		(RTF_ADDRCONF | RTF_DEFAULT)
4447 4448

/* Remove routers and update dst entries when gateway turn into host. */
4449
static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4450 4451
{
	struct in6_addr *gateway = (struct in6_addr *)arg;
4452 4453 4454 4455 4456
	struct fib6_nh *nh;

	/* RA routes do not use nexthops */
	if (rt->nh)
		return 0;
4457

4458
	nh = rt->fib6_nh;
4459
	if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4460
	    nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4461
		return -1;
4462 4463 4464 4465 4466

	/* Further clean up cached routes in exception table.
	 * This is needed because cached route may have a different
	 * gateway than its 'parent' in the case of an ip redirect.
	 */
4467
	fib6_nh_exceptions_clean_tohost(nh, gateway);
4468

4469 4470 4471 4472 4473 4474 4475 4476
	return 0;
}

void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
{
	fib6_clean_all(net, fib6_clean_tohost, gateway);
}

4477 4478
struct arg_netdev_event {
	const struct net_device *dev;
4479
	union {
4480
		unsigned char nh_flags;
4481 4482
		unsigned long event;
	};
4483 4484
};

4485
static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4486
{
4487
	struct fib6_info *iter;
4488 4489
	struct fib6_node *fn;

4490 4491
	fn = rcu_dereference_protected(rt->fib6_node,
			lockdep_is_held(&rt->fib6_table->tb6_lock));
4492
	iter = rcu_dereference_protected(fn->leaf,
4493
			lockdep_is_held(&rt->fib6_table->tb6_lock));
4494
	while (iter) {
4495
		if (iter->fib6_metric == rt->fib6_metric &&
4496
		    rt6_qualify_for_ecmp(iter))
4497
			return iter;
4498
		iter = rcu_dereference_protected(iter->fib6_next,
4499
				lockdep_is_held(&rt->fib6_table->tb6_lock));
4500 4501 4502 4503 4504
	}

	return NULL;
}

4505
/* only called for fib entries with builtin fib6_nh */
4506
static bool rt6_is_dead(const struct fib6_info *rt)
4507
{
4508 4509 4510
	if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
	    (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
	     ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4511 4512 4513 4514 4515
		return true;

	return false;
}

4516
static int rt6_multipath_total_weight(const struct fib6_info *rt)
4517
{
4518
	struct fib6_info *iter;
4519 4520 4521
	int total = 0;

	if (!rt6_is_dead(rt))
4522
		total += rt->fib6_nh->fib_nh_weight;
4523

4524
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4525
		if (!rt6_is_dead(iter))
4526
			total += iter->fib6_nh->fib_nh_weight;
4527 4528 4529 4530 4531
	}

	return total;
}

4532
static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4533 4534 4535 4536
{
	int upper_bound = -1;

	if (!rt6_is_dead(rt)) {
4537
		*weight += rt->fib6_nh->fib_nh_weight;
4538 4539 4540
		upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
						    total) - 1;
	}
4541
	atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4542 4543
}

4544
static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4545
{
4546
	struct fib6_info *iter;
4547 4548 4549 4550
	int weight = 0;

	rt6_upper_bound_set(rt, &weight, total);

4551
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4552 4553 4554
		rt6_upper_bound_set(iter, &weight, total);
}

4555
void rt6_multipath_rebalance(struct fib6_info *rt)
4556
{
4557
	struct fib6_info *first;
4558 4559 4560 4561 4562 4563
	int total;

	/* In case the entire multipath route was marked for flushing,
	 * then there is no need to rebalance upon the removal of every
	 * sibling route.
	 */
4564
	if (!rt->fib6_nsiblings || rt->should_flush)
4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578
		return;

	/* During lookup routes are evaluated in order, so we need to
	 * make sure upper bounds are assigned from the first sibling
	 * onwards.
	 */
	first = rt6_multipath_first_sibling(rt);
	if (WARN_ON_ONCE(!first))
		return;

	total = rt6_multipath_total_weight(first);
	rt6_multipath_upper_bound_set(first, total);
}

4579
static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4580 4581
{
	const struct arg_netdev_event *arg = p_arg;
4582
	struct net *net = dev_net(arg->dev);
4583

4584
	if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4585 4586
	    rt->fib6_nh->fib_nh_dev == arg->dev) {
		rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4587
		fib6_update_sernum_upto_root(net, rt);
4588
		rt6_multipath_rebalance(rt);
4589
	}
4590 4591 4592 4593

	return 0;
}

4594
void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4595 4596 4597
{
	struct arg_netdev_event arg = {
		.dev = dev,
I
Ido Schimmel 已提交
4598 4599 4600
		{
			.nh_flags = nh_flags,
		},
4601 4602 4603 4604 4605 4606 4607 4608
	};

	if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
		arg.nh_flags |= RTNH_F_LINKDOWN;

	fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
}

4609
/* only called for fib entries with inline fib6_nh */
4610
static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4611 4612
				   const struct net_device *dev)
{
4613
	struct fib6_info *iter;
4614

4615
	if (rt->fib6_nh->fib_nh_dev == dev)
4616
		return true;
4617
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4618
		if (iter->fib6_nh->fib_nh_dev == dev)
4619 4620 4621 4622 4623
			return true;

	return false;
}

4624
static void rt6_multipath_flush(struct fib6_info *rt)
4625
{
4626
	struct fib6_info *iter;
4627 4628

	rt->should_flush = 1;
4629
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4630 4631 4632
		iter->should_flush = 1;
}

4633
static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4634 4635
					     const struct net_device *down_dev)
{
4636
	struct fib6_info *iter;
4637 4638
	unsigned int dead = 0;

4639 4640
	if (rt->fib6_nh->fib_nh_dev == down_dev ||
	    rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4641
		dead++;
4642
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4643 4644
		if (iter->fib6_nh->fib_nh_dev == down_dev ||
		    iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4645 4646 4647 4648 4649
			dead++;

	return dead;
}

4650
static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4651
				       const struct net_device *dev,
4652
				       unsigned char nh_flags)
4653
{
4654
	struct fib6_info *iter;
4655

4656 4657
	if (rt->fib6_nh->fib_nh_dev == dev)
		rt->fib6_nh->fib_nh_flags |= nh_flags;
4658
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4659 4660
		if (iter->fib6_nh->fib_nh_dev == dev)
			iter->fib6_nh->fib_nh_flags |= nh_flags;
4661 4662
}

4663
/* called with write lock held for table with rt */
4664
static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
L
Linus Torvalds 已提交
4665
{
4666 4667
	const struct arg_netdev_event *arg = p_arg;
	const struct net_device *dev = arg->dev;
4668
	struct net *net = dev_net(dev);
4669

4670
	if (rt == net->ipv6.fib6_null_entry || rt->nh)
4671 4672 4673 4674
		return 0;

	switch (arg->event) {
	case NETDEV_UNREGISTER:
4675
		return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4676
	case NETDEV_DOWN:
4677
		if (rt->should_flush)
4678
			return -1;
4679
		if (!rt->fib6_nsiblings)
4680
			return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4681 4682 4683 4684
		if (rt6_multipath_uses_dev(rt, dev)) {
			unsigned int count;

			count = rt6_multipath_dead_count(rt, dev);
4685
			if (rt->fib6_nsiblings + 1 == count) {
4686 4687 4688 4689 4690
				rt6_multipath_flush(rt);
				return -1;
			}
			rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
						   RTNH_F_LINKDOWN);
4691
			fib6_update_sernum(net, rt);
4692
			rt6_multipath_rebalance(rt);
4693 4694
		}
		return -2;
4695
	case NETDEV_CHANGE:
4696
		if (rt->fib6_nh->fib_nh_dev != dev ||
4697
		    rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4698
			break;
4699
		rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4700
		rt6_multipath_rebalance(rt);
4701
		break;
4702
	}
4703

L
Linus Torvalds 已提交
4704 4705 4706
	return 0;
}

4707
void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
L
Linus Torvalds 已提交
4708
{
4709
	struct arg_netdev_event arg = {
4710
		.dev = dev,
I
Ido Schimmel 已提交
4711 4712 4713
		{
			.event = event,
		},
4714
	};
4715
	struct net *net = dev_net(dev);
4716

4717 4718 4719 4720
	if (net->ipv6.sysctl.skip_notify_on_dev_down)
		fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
	else
		fib6_clean_all(net, fib6_ifdown, &arg);
4721 4722 4723 4724 4725 4726 4727
}

void rt6_disable_ip(struct net_device *dev, unsigned long event)
{
	rt6_sync_down_dev(dev, event);
	rt6_uncached_list_flush_dev(dev_net(dev), dev);
	neigh_ifdown(&nd_tbl, dev);
L
Linus Torvalds 已提交
4728 4729
}

4730
struct rt6_mtu_change_arg {
L
Linus Torvalds 已提交
4731
	struct net_device *dev;
4732
	unsigned int mtu;
D
David Ahern 已提交
4733
	struct fib6_info *f6i;
L
Linus Torvalds 已提交
4734 4735
};

4736
static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
D
David Ahern 已提交
4737 4738
{
	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4739
	struct fib6_info *f6i = arg->f6i;
D
David Ahern 已提交
4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754

	/* For administrative MTU increase, there is no way to discover
	 * IPv6 PMTU increase, so PMTU increase should be updated here.
	 * Since RFC 1981 doesn't include administrative MTU increase
	 * update PMTU increase is a MUST. (i.e. jumbo frame)
	 */
	if (nh->fib_nh_dev == arg->dev) {
		struct inet6_dev *idev = __in6_dev_get(arg->dev);
		u32 mtu = f6i->fib6_pmtu;

		if (mtu >= arg->mtu ||
		    (mtu < arg->mtu && mtu == idev->cnf.mtu6))
			fib6_metric_set(f6i, RTAX_MTU, arg->mtu);

		spin_lock_bh(&rt6_exception_lock);
4755
		rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
D
David Ahern 已提交
4756 4757 4758 4759 4760 4761 4762
		spin_unlock_bh(&rt6_exception_lock);
	}

	return 0;
}

static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
L
Linus Torvalds 已提交
4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773
{
	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
	struct inet6_dev *idev;

	/* In IPv6 pmtu discovery is not optional,
	   so that RTAX_MTU lock cannot disable it.
	   We still use this lock to block changes
	   caused by addrconf/ndisc.
	*/

	idev = __in6_dev_get(arg->dev);
4774
	if (!idev)
L
Linus Torvalds 已提交
4775 4776
		return 0;

D
David Ahern 已提交
4777 4778
	if (fib6_metric_locked(f6i, RTAX_MTU))
		return 0;
4779

D
David Ahern 已提交
4780
	arg->f6i = f6i;
4781 4782 4783 4784 4785 4786
	if (f6i->nh) {
		/* fib6_nh_mtu_change only returns 0, so this is safe */
		return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
						arg);
	}

4787
	return fib6_nh_mtu_change(f6i->fib6_nh, arg);
L
Linus Torvalds 已提交
4788 4789
}

4790
void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
L
Linus Torvalds 已提交
4791
{
T
Thomas Graf 已提交
4792 4793 4794 4795
	struct rt6_mtu_change_arg arg = {
		.dev = dev,
		.mtu = mtu,
	};
L
Linus Torvalds 已提交
4796

4797
	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
L
Linus Torvalds 已提交
4798 4799
}

4800
static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4801
	[RTA_UNSPEC]		= { .strict_start_type = RTA_DPORT + 1 },
4802
	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
4803
	[RTA_PREFSRC]		= { .len = sizeof(struct in6_addr) },
4804
	[RTA_OIF]               = { .type = NLA_U32 },
4805
	[RTA_IIF]		= { .type = NLA_U32 },
4806 4807
	[RTA_PRIORITY]          = { .type = NLA_U32 },
	[RTA_METRICS]           = { .type = NLA_NESTED },
4808
	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
4809
	[RTA_PREF]              = { .type = NLA_U8 },
4810 4811
	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
	[RTA_ENCAP]		= { .type = NLA_NESTED },
4812
	[RTA_EXPIRES]		= { .type = NLA_U32 },
4813
	[RTA_UID]		= { .type = NLA_U32 },
4814
	[RTA_MARK]		= { .type = NLA_U32 },
4815
	[RTA_TABLE]		= { .type = NLA_U32 },
4816 4817 4818
	[RTA_IP_PROTO]		= { .type = NLA_U8 },
	[RTA_SPORT]		= { .type = NLA_U16 },
	[RTA_DPORT]		= { .type = NLA_U16 },
4819
	[RTA_NH_ID]		= { .type = NLA_U32 },
4820 4821 4822
};

static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4823 4824
			      struct fib6_config *cfg,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
4825
{
4826 4827
	struct rtmsg *rtm;
	struct nlattr *tb[RTA_MAX+1];
4828
	unsigned int pref;
4829
	int err;
L
Linus Torvalds 已提交
4830

4831 4832
	err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
				     rtm_ipv6_policy, extack);
4833 4834
	if (err < 0)
		goto errout;
L
Linus Torvalds 已提交
4835

4836 4837 4838
	err = -EINVAL;
	rtm = nlmsg_data(nlh);

4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850
	*cfg = (struct fib6_config){
		.fc_table = rtm->rtm_table,
		.fc_dst_len = rtm->rtm_dst_len,
		.fc_src_len = rtm->rtm_src_len,
		.fc_flags = RTF_UP,
		.fc_protocol = rtm->rtm_protocol,
		.fc_type = rtm->rtm_type,

		.fc_nlinfo.portid = NETLINK_CB(skb).portid,
		.fc_nlinfo.nlh = nlh,
		.fc_nlinfo.nl_net = sock_net(skb->sk),
	};
4851

4852 4853
	if (rtm->rtm_type == RTN_UNREACHABLE ||
	    rtm->rtm_type == RTN_BLACKHOLE ||
4854 4855
	    rtm->rtm_type == RTN_PROHIBIT ||
	    rtm->rtm_type == RTN_THROW)
4856 4857
		cfg->fc_flags |= RTF_REJECT;

4858 4859 4860
	if (rtm->rtm_type == RTN_LOCAL)
		cfg->fc_flags |= RTF_LOCAL;

4861 4862 4863
	if (rtm->rtm_flags & RTM_F_CLONED)
		cfg->fc_flags |= RTF_CACHE;

4864 4865
	cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);

4866 4867 4868 4869 4870 4871 4872 4873 4874 4875
	if (tb[RTA_NH_ID]) {
		if (tb[RTA_GATEWAY]   || tb[RTA_OIF] ||
		    tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
			NL_SET_ERR_MSG(extack,
				       "Nexthop specification and nexthop id are mutually exclusive");
			goto errout;
		}
		cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
	}

4876
	if (tb[RTA_GATEWAY]) {
4877
		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4878
		cfg->fc_flags |= RTF_GATEWAY;
L
Linus Torvalds 已提交
4879
	}
4880 4881 4882 4883
	if (tb[RTA_VIA]) {
		NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
		goto errout;
	}
4884 4885 4886 4887 4888 4889 4890 4891

	if (tb[RTA_DST]) {
		int plen = (rtm->rtm_dst_len + 7) >> 3;

		if (nla_len(tb[RTA_DST]) < plen)
			goto errout;

		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
L
Linus Torvalds 已提交
4892
	}
4893 4894 4895 4896 4897 4898 4899 4900

	if (tb[RTA_SRC]) {
		int plen = (rtm->rtm_src_len + 7) >> 3;

		if (nla_len(tb[RTA_SRC]) < plen)
			goto errout;

		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
L
Linus Torvalds 已提交
4901
	}
4902

4903
	if (tb[RTA_PREFSRC])
4904
		cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4905

4906 4907 4908 4909 4910 4911 4912 4913 4914
	if (tb[RTA_OIF])
		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);

	if (tb[RTA_PRIORITY])
		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);

	if (tb[RTA_METRICS]) {
		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
L
Linus Torvalds 已提交
4915
	}
4916 4917 4918 4919

	if (tb[RTA_TABLE])
		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);

4920 4921 4922
	if (tb[RTA_MULTIPATH]) {
		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4923 4924

		err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4925
						     cfg->fc_mp_len, extack);
4926 4927
		if (err < 0)
			goto errout;
4928 4929
	}

4930 4931 4932 4933 4934 4935 4936 4937
	if (tb[RTA_PREF]) {
		pref = nla_get_u8(tb[RTA_PREF]);
		if (pref != ICMPV6_ROUTER_PREF_LOW &&
		    pref != ICMPV6_ROUTER_PREF_HIGH)
			pref = ICMPV6_ROUTER_PREF_MEDIUM;
		cfg->fc_flags |= RTF_PREF(pref);
	}

4938 4939 4940
	if (tb[RTA_ENCAP])
		cfg->fc_encap = tb[RTA_ENCAP];

4941
	if (tb[RTA_ENCAP_TYPE]) {
4942 4943
		cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);

4944
		err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
4945 4946 4947 4948
		if (err < 0)
			goto errout;
	}

4949 4950 4951 4952 4953 4954 4955 4956 4957
	if (tb[RTA_EXPIRES]) {
		unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);

		if (addrconf_finite_timeout(timeout)) {
			cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
			cfg->fc_flags |= RTF_EXPIRES;
		}
	}

4958 4959 4960
	err = 0;
errout:
	return err;
L
Linus Torvalds 已提交
4961 4962
}

4963
struct rt6_nh {
4964
	struct fib6_info *fib6_info;
4965 4966 4967 4968
	struct fib6_config r_cfg;
	struct list_head next;
};

4969 4970
static int ip6_route_info_append(struct net *net,
				 struct list_head *rt6_nh_list,
4971 4972
				 struct fib6_info *rt,
				 struct fib6_config *r_cfg)
4973 4974 4975 4976 4977
{
	struct rt6_nh *nh;
	int err = -EEXIST;

	list_for_each_entry(nh, rt6_nh_list, next) {
4978 4979
		/* check if fib6_info already exists */
		if (rt6_duplicate_nexthop(nh->fib6_info, rt))
4980 4981 4982 4983 4984 4985
			return err;
	}

	nh = kzalloc(sizeof(*nh), GFP_KERNEL);
	if (!nh)
		return -ENOMEM;
4986
	nh->fib6_info = rt;
4987 4988 4989 4990 4991 4992
	memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
	list_add_tail(&nh->next, rt6_nh_list);

	return 0;
}

4993 4994
static void ip6_route_mpath_notify(struct fib6_info *rt,
				   struct fib6_info *rt_last,
4995 4996 4997 4998 4999 5000 5001 5002 5003
				   struct nl_info *info,
				   __u16 nlflags)
{
	/* if this is an APPEND route, then rt points to the first route
	 * inserted and rt_last points to last route inserted. Userspace
	 * wants a consistent dump of the route which starts at the first
	 * nexthop. Since sibling routes are always added at the end of
	 * the list, find the first sibling of the last route appended
	 */
5004 5005
	if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
		rt = list_first_entry(&rt_last->fib6_siblings,
5006
				      struct fib6_info,
5007
				      fib6_siblings);
5008 5009 5010 5011 5012 5013
	}

	if (rt)
		inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
}

5014 5015
static int ip6_route_multipath_add(struct fib6_config *cfg,
				   struct netlink_ext_ack *extack)
5016
{
5017
	struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5018
	struct nl_info *info = &cfg->fc_nlinfo;
5019
	enum fib_event_type event_type;
5020 5021
	struct fib6_config r_cfg;
	struct rtnexthop *rtnh;
5022
	struct fib6_info *rt;
5023 5024
	struct rt6_nh *err_nh;
	struct rt6_nh *nh, *nh_safe;
5025
	__u16 nlflags;
5026 5027
	int remaining;
	int attrlen;
5028 5029 5030 5031 5032
	int err = 1;
	int nhn = 0;
	int replace = (cfg->fc_nlinfo.nlh &&
		       (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
	LIST_HEAD(rt6_nh_list);
5033

5034 5035 5036 5037
	nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
	if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
		nlflags |= NLM_F_APPEND;

5038
	remaining = cfg->fc_mp_len;
5039 5040
	rtnh = (struct rtnexthop *)cfg->fc_mp;

5041
	/* Parse a Multipath Entry and build a list (rt6_nh_list) of
5042
	 * fib6_info structs per nexthop
5043
	 */
5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054
	while (rtnh_ok(rtnh, remaining)) {
		memcpy(&r_cfg, cfg, sizeof(*cfg));
		if (rtnh->rtnh_ifindex)
			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;

		attrlen = rtnh_attrlen(rtnh);
		if (attrlen > 0) {
			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);

			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
			if (nla) {
5055
				r_cfg.fc_gateway = nla_get_in6_addr(nla);
5056 5057
				r_cfg.fc_flags |= RTF_GATEWAY;
			}
5058 5059 5060 5061
			r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
			nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
			if (nla)
				r_cfg.fc_encap_type = nla_get_u16(nla);
5062
		}
5063

5064
		r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5065
		rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5066 5067 5068
		if (IS_ERR(rt)) {
			err = PTR_ERR(rt);
			rt = NULL;
5069
			goto cleanup;
5070
		}
5071 5072 5073 5074 5075 5076 5077
		if (!rt6_qualify_for_ecmp(rt)) {
			err = -EINVAL;
			NL_SET_ERR_MSG(extack,
				       "Device only routes can not be added for IPv6 using the multipath API.");
			fib6_info_release(rt);
			goto cleanup;
		}
5078

5079
		rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5080

5081 5082
		err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
					    rt, &r_cfg);
5083
		if (err) {
5084
			fib6_info_release(rt);
5085 5086 5087 5088 5089 5090
			goto cleanup;
		}

		rtnh = rtnh_next(rtnh, &remaining);
	}

5091 5092 5093 5094 5095 5096
	if (list_empty(&rt6_nh_list)) {
		NL_SET_ERR_MSG(extack,
			       "Invalid nexthop configuration - no valid nexthops");
		return -EINVAL;
	}

5097 5098 5099 5100 5101 5102
	/* for add and replace send one notification with all nexthops.
	 * Skip the notification in fib6_add_rt2node and send one with
	 * the full route when done
	 */
	info->skip_notify = 1;

5103 5104 5105 5106 5107
	/* For add and replace, send one notification with all nexthops. For
	 * append, send one notification with all appended nexthops.
	 */
	info->skip_notify_kernel = 1;

5108 5109
	err_nh = NULL;
	list_for_each_entry(nh, &rt6_nh_list, next) {
5110 5111
		err = __ip6_ins_rt(nh->fib6_info, info, extack);
		fib6_info_release(nh->fib6_info);
5112

5113 5114 5115 5116 5117 5118 5119 5120
		if (!err) {
			/* save reference to last route successfully inserted */
			rt_last = nh->fib6_info;

			/* save reference to first route for notification */
			if (!rt_notif)
				rt_notif = nh->fib6_info;
		}
5121

5122 5123
		/* nh->fib6_info is used or freed at this point, reset to NULL*/
		nh->fib6_info = NULL;
5124 5125
		if (err) {
			if (replace && nhn)
5126 5127
				NL_SET_ERR_MSG_MOD(extack,
						   "multipath route replace failed (check consistency of installed routes)");
5128 5129
			err_nh = nh;
			goto add_errout;
5130
		}
5131

5132
		/* Because each route is added like a single route we remove
5133 5134 5135 5136 5137
		 * these flags after the first nexthop: if there is a collision,
		 * we have already failed to add the first nexthop:
		 * fib6_add_rt2node() has rejected it; when replacing, old
		 * nexthops have been replaced by first new, the rest should
		 * be added to it.
5138
		 */
5139 5140
		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
						     NLM_F_REPLACE);
5141 5142 5143
		nhn++;
	}

5144 5145 5146 5147 5148 5149 5150 5151 5152
	event_type = replace ? FIB_EVENT_ENTRY_REPLACE : FIB_EVENT_ENTRY_ADD;
	err = call_fib6_multipath_entry_notifiers(info->nl_net, event_type,
						  rt_notif, nhn - 1, extack);
	if (err) {
		/* Delete all the siblings that were just added */
		err_nh = NULL;
		goto add_errout;
	}

5153 5154
	/* success ... tell user about new route */
	ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5155 5156 5157
	goto cleanup;

add_errout:
5158 5159 5160 5161 5162 5163 5164
	/* send notification for routes that were added so that
	 * the delete notifications sent by ip6_route_del are
	 * coherent
	 */
	if (rt_notif)
		ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);

5165 5166 5167 5168
	/* Delete routes that were already added */
	list_for_each_entry(nh, &rt6_nh_list, next) {
		if (err_nh == nh)
			break;
5169
		ip6_route_del(&nh->r_cfg, extack);
5170 5171 5172 5173
	}

cleanup:
	list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
5174 5175
		if (nh->fib6_info)
			fib6_info_release(nh->fib6_info);
5176 5177 5178 5179 5180 5181 5182
		list_del(&nh->next);
		kfree(nh);
	}

	return err;
}

5183 5184
static int ip6_route_multipath_del(struct fib6_config *cfg,
				   struct netlink_ext_ack *extack)
5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210
{
	struct fib6_config r_cfg;
	struct rtnexthop *rtnh;
	int remaining;
	int attrlen;
	int err = 1, last_err = 0;

	remaining = cfg->fc_mp_len;
	rtnh = (struct rtnexthop *)cfg->fc_mp;

	/* Parse a Multipath Entry */
	while (rtnh_ok(rtnh, remaining)) {
		memcpy(&r_cfg, cfg, sizeof(*cfg));
		if (rtnh->rtnh_ifindex)
			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;

		attrlen = rtnh_attrlen(rtnh);
		if (attrlen > 0) {
			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);

			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
			if (nla) {
				nla_memcpy(&r_cfg.fc_gateway, nla, 16);
				r_cfg.fc_flags |= RTF_GATEWAY;
			}
		}
5211
		err = ip6_route_del(&r_cfg, extack);
5212 5213 5214
		if (err)
			last_err = err;

5215 5216 5217 5218 5219 5220
		rtnh = rtnh_next(rtnh, &remaining);
	}

	return last_err;
}

5221 5222
static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
5223
{
5224 5225
	struct fib6_config cfg;
	int err;
L
Linus Torvalds 已提交
5226

5227
	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5228 5229 5230
	if (err < 0)
		return err;

5231 5232 5233 5234 5235 5236
	if (cfg.fc_nh_id &&
	    !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
		NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
		return -EINVAL;
	}

5237
	if (cfg.fc_mp)
5238
		return ip6_route_multipath_del(&cfg, extack);
5239 5240
	else {
		cfg.fc_delete_all_nh = 1;
5241
		return ip6_route_del(&cfg, extack);
5242
	}
L
Linus Torvalds 已提交
5243 5244
}

5245 5246
static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
5247
{
5248 5249
	struct fib6_config cfg;
	int err;
L
Linus Torvalds 已提交
5250

5251
	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5252 5253 5254
	if (err < 0)
		return err;

5255 5256 5257
	if (cfg.fc_metric == 0)
		cfg.fc_metric = IP6_RT_PRIO_USER;

5258
	if (cfg.fc_mp)
5259
		return ip6_route_multipath_add(&cfg, extack);
5260
	else
5261
		return ip6_route_add(&cfg, GFP_KERNEL, extack);
L
Linus Torvalds 已提交
5262 5263
}

5264 5265
/* add the overhead of this fib6_nh to nexthop_len */
static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5266
{
5267
	int *nexthop_len = arg;
5268

5269 5270 5271 5272 5273 5274 5275 5276 5277 5278
	*nexthop_len += nla_total_size(0)	 /* RTA_MULTIPATH */
		     + NLA_ALIGN(sizeof(struct rtnexthop))
		     + nla_total_size(16); /* RTA_GATEWAY */

	if (nh->fib_nh_lws) {
		/* RTA_ENCAP_TYPE */
		*nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
		/* RTA_ENCAP */
		*nexthop_len += nla_total_size(2);
	}
5279

5280 5281
	return 0;
}
5282

5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303
static size_t rt6_nlmsg_size(struct fib6_info *f6i)
{
	int nexthop_len;

	if (f6i->nh) {
		nexthop_len = nla_total_size(4); /* RTA_NH_ID */
		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
					 &nexthop_len);
	} else {
		struct fib6_nh *nh = f6i->fib6_nh;

		nexthop_len = 0;
		if (f6i->fib6_nsiblings) {
			nexthop_len = nla_total_size(0)	 /* RTA_MULTIPATH */
				    + NLA_ALIGN(sizeof(struct rtnexthop))
				    + nla_total_size(16) /* RTA_GATEWAY */
				    + lwtunnel_get_encap_size(nh->fib_nh_lws);

			nexthop_len *= f6i->fib6_nsiblings;
		}
		nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5304 5305
	}

5306 5307 5308 5309 5310 5311 5312 5313 5314
	return NLMSG_ALIGN(sizeof(struct rtmsg))
	       + nla_total_size(16) /* RTA_SRC */
	       + nla_total_size(16) /* RTA_DST */
	       + nla_total_size(16) /* RTA_GATEWAY */
	       + nla_total_size(16) /* RTA_PREFSRC */
	       + nla_total_size(4) /* RTA_TABLE */
	       + nla_total_size(4) /* RTA_IIF */
	       + nla_total_size(4) /* RTA_OIF */
	       + nla_total_size(4) /* RTA_PRIORITY */
5315
	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5316
	       + nla_total_size(sizeof(struct rta_cacheinfo))
5317
	       + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5318
	       + nla_total_size(1) /* RTA_PREF */
5319 5320 5321
	       + nexthop_len;
}

5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350
static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
				 unsigned char *flags)
{
	if (nexthop_is_multipath(nh)) {
		struct nlattr *mp;

		mp = nla_nest_start(skb, RTA_MULTIPATH);
		if (!mp)
			goto nla_put_failure;

		if (nexthop_mpath_fill_node(skb, nh))
			goto nla_put_failure;

		nla_nest_end(skb, mp);
	} else {
		struct fib6_nh *fib6_nh;

		fib6_nh = nexthop_fib6_nh(nh);
		if (fib_nexthop_info(skb, &fib6_nh->nh_common,
				     flags, false) < 0)
			goto nla_put_failure;
	}

	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

5351
static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5352
			 struct fib6_info *rt, struct dst_entry *dst,
5353
			 struct in6_addr *dest, struct in6_addr *src,
5354
			 int iif, int type, u32 portid, u32 seq,
5355
			 unsigned int flags)
L
Linus Torvalds 已提交
5356
{
5357 5358 5359
	struct rt6_info *rt6 = (struct rt6_info *)dst;
	struct rt6key *rt6_dst, *rt6_src;
	u32 *pmetrics, table, rt6_flags;
5360
	unsigned char nh_flags = 0;
5361
	struct nlmsghdr *nlh;
5362
	struct rtmsg *rtm;
5363
	long expires = 0;
L
Linus Torvalds 已提交
5364

5365
	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5366
	if (!nlh)
5367
		return -EMSGSIZE;
5368

5369 5370 5371 5372 5373 5374 5375 5376 5377 5378
	if (rt6) {
		rt6_dst = &rt6->rt6i_dst;
		rt6_src = &rt6->rt6i_src;
		rt6_flags = rt6->rt6i_flags;
	} else {
		rt6_dst = &rt->fib6_dst;
		rt6_src = &rt->fib6_src;
		rt6_flags = rt->fib6_flags;
	}

5379
	rtm = nlmsg_data(nlh);
L
Linus Torvalds 已提交
5380
	rtm->rtm_family = AF_INET6;
5381 5382
	rtm->rtm_dst_len = rt6_dst->plen;
	rtm->rtm_src_len = rt6_src->plen;
L
Linus Torvalds 已提交
5383
	rtm->rtm_tos = 0;
5384 5385
	if (rt->fib6_table)
		table = rt->fib6_table->tb6_id;
T
Thomas Graf 已提交
5386
	else
5387
		table = RT6_TABLE_UNSPEC;
5388
	rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
D
David S. Miller 已提交
5389 5390
	if (nla_put_u32(skb, RTA_TABLE, table))
		goto nla_put_failure;
5391 5392

	rtm->rtm_type = rt->fib6_type;
L
Linus Torvalds 已提交
5393 5394
	rtm->rtm_flags = 0;
	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5395
	rtm->rtm_protocol = rt->fib6_protocol;
L
Linus Torvalds 已提交
5396

5397
	if (rt6_flags & RTF_CACHE)
L
Linus Torvalds 已提交
5398 5399
		rtm->rtm_flags |= RTM_F_CLONED;

5400 5401
	if (dest) {
		if (nla_put_in6_addr(skb, RTA_DST, dest))
D
David S. Miller 已提交
5402
			goto nla_put_failure;
5403
		rtm->rtm_dst_len = 128;
L
Linus Torvalds 已提交
5404
	} else if (rtm->rtm_dst_len)
5405
		if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
D
David S. Miller 已提交
5406
			goto nla_put_failure;
L
Linus Torvalds 已提交
5407 5408
#ifdef CONFIG_IPV6_SUBTREES
	if (src) {
5409
		if (nla_put_in6_addr(skb, RTA_SRC, src))
D
David S. Miller 已提交
5410
			goto nla_put_failure;
5411
		rtm->rtm_src_len = 128;
D
David S. Miller 已提交
5412
	} else if (rtm->rtm_src_len &&
5413
		   nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
D
David S. Miller 已提交
5414
		goto nla_put_failure;
L
Linus Torvalds 已提交
5415
#endif
5416 5417
	if (iif) {
#ifdef CONFIG_IPV6_MROUTE
5418
		if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5419 5420 5421 5422 5423 5424
			int err = ip6mr_get_route(net, skb, rtm, portid);

			if (err == 0)
				return 0;
			if (err < 0)
				goto nla_put_failure;
5425 5426
		} else
#endif
D
David S. Miller 已提交
5427 5428
			if (nla_put_u32(skb, RTA_IIF, iif))
				goto nla_put_failure;
5429
	} else if (dest) {
L
Linus Torvalds 已提交
5430
		struct in6_addr saddr_buf;
5431
		if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
5432
		    nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
D
David S. Miller 已提交
5433
			goto nla_put_failure;
L
Linus Torvalds 已提交
5434
	}
5435

5436
	if (rt->fib6_prefsrc.plen) {
5437
		struct in6_addr saddr_buf;
5438
		saddr_buf = rt->fib6_prefsrc.addr;
5439
		if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
D
David S. Miller 已提交
5440
			goto nla_put_failure;
5441 5442
	}

5443 5444
	pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
	if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5445 5446
		goto nla_put_failure;

5447
	if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
D
David S. Miller 已提交
5448
		goto nla_put_failure;
5449

5450 5451 5452
	/* For multipath routes, walk the siblings list and add
	 * each as a nexthop within RTA_MULTIPATH.
	 */
5453 5454 5455 5456 5457 5458 5459 5460
	if (rt6) {
		if (rt6_flags & RTF_GATEWAY &&
		    nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
			goto nla_put_failure;

		if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
			goto nla_put_failure;
	} else if (rt->fib6_nsiblings) {
5461
		struct fib6_info *sibling, *next_sibling;
5462 5463
		struct nlattr *mp;

5464
		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5465 5466 5467
		if (!mp)
			goto nla_put_failure;

5468 5469
		if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
				    rt->fib6_nh->fib_nh_weight) < 0)
5470 5471 5472
			goto nla_put_failure;

		list_for_each_entry_safe(sibling, next_sibling,
5473
					 &rt->fib6_siblings, fib6_siblings) {
5474 5475
			if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
					    sibling->fib6_nh->fib_nh_weight) < 0)
5476 5477 5478 5479
				goto nla_put_failure;
		}

		nla_nest_end(skb, mp);
5480 5481 5482
	} else if (rt->nh) {
		if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
			goto nla_put_failure;
5483

5484 5485 5486 5487 5488 5489 5490 5491
		if (nexthop_is_blackhole(rt->nh))
			rtm->rtm_type = RTN_BLACKHOLE;

		if (rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
			goto nla_put_failure;

		rtm->rtm_flags |= nh_flags;
	} else {
5492
		if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common,
5493
				     &nh_flags, false) < 0)
5494
			goto nla_put_failure;
5495 5496

		rtm->rtm_flags |= nh_flags;
5497 5498
	}

5499
	if (rt6_flags & RTF_EXPIRES) {
5500 5501 5502
		expires = dst ? dst->expires : rt->expires;
		expires -= jiffies;
	}
5503

5504
	if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5505
		goto nla_put_failure;
5506

5507
	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5508 5509
		goto nla_put_failure;

5510

5511 5512
	nlmsg_end(skb, nlh);
	return 0;
5513 5514

nla_put_failure:
5515 5516
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
L
Linus Torvalds 已提交
5517 5518
}

5519 5520 5521 5522 5523 5524 5525 5526 5527 5528
static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
{
	const struct net_device *dev = arg;

	if (nh->fib_nh_dev == dev)
		return 1;

	return 0;
}

5529 5530 5531
static bool fib6_info_uses_dev(const struct fib6_info *f6i,
			       const struct net_device *dev)
{
5532 5533 5534 5535 5536 5537 5538 5539
	if (f6i->nh) {
		struct net_device *_dev = (struct net_device *)dev;

		return !!nexthop_for_each_fib6_nh(f6i->nh,
						  fib6_info_nh_uses_dev,
						  _dev);
	}

5540
	if (f6i->fib6_nh->fib_nh_dev == dev)
5541 5542 5543 5544 5545 5546 5547
		return true;

	if (f6i->fib6_nsiblings) {
		struct fib6_info *sibling, *next_sibling;

		list_for_each_entry_safe(sibling, next_sibling,
					 &f6i->fib6_siblings, fib6_siblings) {
5548
			if (sibling->fib6_nh->fib_nh_dev == dev)
5549 5550 5551 5552 5553 5554 5555
				return true;
		}
	}

	return false;
}

5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614
struct fib6_nh_exception_dump_walker {
	struct rt6_rtnl_dump_arg *dump;
	struct fib6_info *rt;
	unsigned int flags;
	unsigned int skip;
	unsigned int count;
};

static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
{
	struct fib6_nh_exception_dump_walker *w = arg;
	struct rt6_rtnl_dump_arg *dump = w->dump;
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	int i, err;

	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
	if (!bucket)
		return 0;

	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
			if (w->skip) {
				w->skip--;
				continue;
			}

			/* Expiration of entries doesn't bump sernum, insertion
			 * does. Removal is triggered by insertion, so we can
			 * rely on the fact that if entries change between two
			 * partial dumps, this node is scanned again completely,
			 * see rt6_insert_exception() and fib6_dump_table().
			 *
			 * Count expired entries we go through as handled
			 * entries that we'll skip next time, in case of partial
			 * node dump. Otherwise, if entries expire meanwhile,
			 * we'll skip the wrong amount.
			 */
			if (rt6_check_expired(rt6_ex->rt6i)) {
				w->count++;
				continue;
			}

			err = rt6_fill_node(dump->net, dump->skb, w->rt,
					    &rt6_ex->rt6i->dst, NULL, NULL, 0,
					    RTM_NEWROUTE,
					    NETLINK_CB(dump->cb->skb).portid,
					    dump->cb->nlh->nlmsg_seq, w->flags);
			if (err)
				return err;

			w->count++;
		}
		bucket++;
	}

	return 0;
}

5615
/* Return -1 if done with node, number of handled routes on partial dump */
5616
int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
L
Linus Torvalds 已提交
5617 5618
{
	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5619 5620
	struct fib_dump_filter *filter = &arg->filter;
	unsigned int flags = NLM_F_MULTI;
5621
	struct net *net = arg->net;
5622
	int count = 0;
5623

D
David Ahern 已提交
5624
	if (rt == net->ipv6.fib6_null_entry)
5625
		return -1;
L
Linus Torvalds 已提交
5626

5627 5628 5629
	if ((filter->flags & RTM_F_PREFIX) &&
	    !(rt->fib6_flags & RTF_PREFIX_RT)) {
		/* success since this is not a prefix route */
5630
		return -1;
5631
	}
5632 5633 5634 5635 5636 5637 5638 5639 5640
	if (filter->filter_set &&
	    ((filter->rt_type  && rt->fib6_type != filter->rt_type) ||
	     (filter->dev      && !fib6_info_uses_dev(rt, filter->dev)) ||
	     (filter->protocol && rt->fib6_protocol != filter->protocol))) {
		return -1;
	}

	if (filter->filter_set ||
	    !filter->dump_routes || !filter->dump_exceptions) {
5641
		flags |= NLM_F_DUMP_FILTERED;
5642
	}
L
Linus Torvalds 已提交
5643

5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665
	if (filter->dump_routes) {
		if (skip) {
			skip--;
		} else {
			if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
					  0, RTM_NEWROUTE,
					  NETLINK_CB(arg->cb->skb).portid,
					  arg->cb->nlh->nlmsg_seq, flags)) {
				return 0;
			}
			count++;
		}
	}

	if (filter->dump_exceptions) {
		struct fib6_nh_exception_dump_walker w = { .dump = arg,
							   .rt = rt,
							   .flags = flags,
							   .skip = skip,
							   .count = 0 };
		int err;

5666
		rcu_read_lock();
5667 5668 5669 5670 5671 5672 5673
		if (rt->nh) {
			err = nexthop_for_each_fib6_nh(rt->nh,
						       rt6_nh_dump_exceptions,
						       &w);
		} else {
			err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
		}
5674
		rcu_read_unlock();
5675 5676 5677 5678

		if (err)
			return count += w.count;
	}
5679 5680

	return -1;
L
Linus Torvalds 已提交
5681 5682
}

5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697
static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
					const struct nlmsghdr *nlh,
					struct nlattr **tb,
					struct netlink_ext_ack *extack)
{
	struct rtmsg *rtm;
	int i, err;

	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
		NL_SET_ERR_MSG_MOD(extack,
				   "Invalid header for get route request");
		return -EINVAL;
	}

	if (!netlink_strict_get_check(skb))
5698 5699
		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
					      rtm_ipv6_policy, extack);
5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714

	rtm = nlmsg_data(nlh);
	if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
	    rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
	    rtm->rtm_type) {
		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
		return -EINVAL;
	}
	if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
		NL_SET_ERR_MSG_MOD(extack,
				   "Invalid flags for get route request");
		return -EINVAL;
	}

5715 5716
	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
					    rtm_ipv6_policy, extack);
5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749
	if (err)
		return err;

	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
		NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
		return -EINVAL;
	}

	for (i = 0; i <= RTA_MAX; i++) {
		if (!tb[i])
			continue;

		switch (i) {
		case RTA_SRC:
		case RTA_DST:
		case RTA_IIF:
		case RTA_OIF:
		case RTA_MARK:
		case RTA_UID:
		case RTA_SPORT:
		case RTA_DPORT:
		case RTA_IP_PROTO:
			break;
		default:
			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
			return -EINVAL;
		}
	}

	return 0;
}

5750 5751
static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
5752
{
5753
	struct net *net = sock_net(in_skb->sk);
5754
	struct nlattr *tb[RTA_MAX+1];
5755
	int err, iif = 0, oif = 0;
5756
	struct fib6_info *from;
5757
	struct dst_entry *dst;
5758
	struct rt6_info *rt;
L
Linus Torvalds 已提交
5759
	struct sk_buff *skb;
5760
	struct rtmsg *rtm;
5761
	struct flowi6 fl6 = {};
5762
	bool fibmatch;
L
Linus Torvalds 已提交
5763

5764
	err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
5765 5766
	if (err < 0)
		goto errout;
L
Linus Torvalds 已提交
5767

5768
	err = -EINVAL;
5769 5770
	rtm = nlmsg_data(nlh);
	fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
5771
	fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
L
Linus Torvalds 已提交
5772

5773 5774 5775 5776
	if (tb[RTA_SRC]) {
		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
			goto errout;

A
Alexey Dobriyan 已提交
5777
		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
5778 5779 5780 5781 5782 5783
	}

	if (tb[RTA_DST]) {
		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
			goto errout;

A
Alexey Dobriyan 已提交
5784
		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
5785 5786 5787 5788 5789 5790
	}

	if (tb[RTA_IIF])
		iif = nla_get_u32(tb[RTA_IIF]);

	if (tb[RTA_OIF])
5791
		oif = nla_get_u32(tb[RTA_OIF]);
L
Linus Torvalds 已提交
5792

5793 5794 5795
	if (tb[RTA_MARK])
		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);

5796 5797 5798 5799 5800 5801
	if (tb[RTA_UID])
		fl6.flowi6_uid = make_kuid(current_user_ns(),
					   nla_get_u32(tb[RTA_UID]));
	else
		fl6.flowi6_uid = iif ? INVALID_UID : current_uid();

5802 5803 5804 5805 5806 5807 5808 5809
	if (tb[RTA_SPORT])
		fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);

	if (tb[RTA_DPORT])
		fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);

	if (tb[RTA_IP_PROTO]) {
		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
5810 5811
						  &fl6.flowi6_proto, AF_INET6,
						  extack);
5812 5813 5814 5815
		if (err)
			goto errout;
	}

L
Linus Torvalds 已提交
5816 5817
	if (iif) {
		struct net_device *dev;
5818 5819
		int flags = 0;

5820 5821 5822
		rcu_read_lock();

		dev = dev_get_by_index_rcu(net, iif);
L
Linus Torvalds 已提交
5823
		if (!dev) {
5824
			rcu_read_unlock();
L
Linus Torvalds 已提交
5825
			err = -ENODEV;
5826
			goto errout;
L
Linus Torvalds 已提交
5827
		}
5828 5829 5830 5831 5832 5833

		fl6.flowi6_iif = iif;

		if (!ipv6_addr_any(&fl6.saddr))
			flags |= RT6_LOOKUP_F_HAS_SADDR;

D
David Ahern 已提交
5834
		dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
5835 5836

		rcu_read_unlock();
5837 5838 5839
	} else {
		fl6.flowi6_oif = oif;

5840
		dst = ip6_route_output(net, NULL, &fl6);
5841 5842 5843 5844 5845 5846 5847 5848
	}


	rt = container_of(dst, struct rt6_info, dst);
	if (rt->dst.error) {
		err = rt->dst.error;
		ip6_rt_put(rt);
		goto errout;
L
Linus Torvalds 已提交
5849 5850
	}

5851 5852 5853 5854 5855 5856
	if (rt == net->ipv6.ip6_null_entry) {
		err = rt->dst.error;
		ip6_rt_put(rt);
		goto errout;
	}

5857
	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
5858
	if (!skb) {
A
Amerigo Wang 已提交
5859
		ip6_rt_put(rt);
5860 5861 5862
		err = -ENOBUFS;
		goto errout;
	}
L
Linus Torvalds 已提交
5863

5864
	skb_dst_set(skb, &rt->dst);
5865 5866 5867

	rcu_read_lock();
	from = rcu_dereference(rt->from);
5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881
	if (from) {
		if (fibmatch)
			err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
					    iif, RTM_NEWROUTE,
					    NETLINK_CB(in_skb).portid,
					    nlh->nlmsg_seq, 0);
		else
			err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
					    &fl6.saddr, iif, RTM_NEWROUTE,
					    NETLINK_CB(in_skb).portid,
					    nlh->nlmsg_seq, 0);
	} else {
		err = -ENETUNREACH;
	}
5882 5883
	rcu_read_unlock();

L
Linus Torvalds 已提交
5884
	if (err < 0) {
5885 5886
		kfree_skb(skb);
		goto errout;
L
Linus Torvalds 已提交
5887 5888
	}

5889
	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5890
errout:
L
Linus Torvalds 已提交
5891 5892 5893
	return err;
}

5894
void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
5895
		     unsigned int nlm_flags)
L
Linus Torvalds 已提交
5896 5897
{
	struct sk_buff *skb;
5898
	struct net *net = info->nl_net;
5899 5900 5901 5902
	u32 seq;
	int err;

	err = -ENOBUFS;
5903
	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5904

5905
	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5906
	if (!skb)
5907 5908
		goto errout;

5909 5910
	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
			    event, info->portid, seq, nlm_flags);
5911 5912 5913 5914 5915 5916
	if (err < 0) {
		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
		WARN_ON(err == -EMSGSIZE);
		kfree_skb(skb);
		goto errout;
	}
5917
	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5918 5919
		    info->nlh, gfp_any());
	return;
5920 5921
errout:
	if (err < 0)
5922
		rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
L
Linus Torvalds 已提交
5923 5924
}

5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956
void fib6_rt_update(struct net *net, struct fib6_info *rt,
		    struct nl_info *info)
{
	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
	struct sk_buff *skb;
	int err = -ENOBUFS;

	/* call_fib6_entry_notifiers will be removed when in-kernel notifier
	 * is implemented and supported for nexthop objects
	 */
	call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);

	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
	if (!skb)
		goto errout;

	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
			    RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
	if (err < 0) {
		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
		WARN_ON(err == -EMSGSIZE);
		kfree_skb(skb);
		goto errout;
	}
	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
		    info->nlh, gfp_any());
	return;
errout:
	if (err < 0)
		rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
}

5957
static int ip6_route_dev_notify(struct notifier_block *this,
5958
				unsigned long event, void *ptr)
5959
{
5960
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5961
	struct net *net = dev_net(dev);
5962

5963 5964 5965 5966
	if (!(dev->flags & IFF_LOOPBACK))
		return NOTIFY_OK;

	if (event == NETDEV_REGISTER) {
5967
		net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
5968
		net->ipv6.ip6_null_entry->dst.dev = dev;
5969 5970
		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5971
		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
5972
		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
5973
		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
5974
		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
5975
#endif
5976 5977 5978 5979 5980
	 } else if (event == NETDEV_UNREGISTER &&
		    dev->reg_state != NETREG_UNREGISTERED) {
		/* NETDEV_UNREGISTER could be fired for multiple times by
		 * netdev_wait_allrefs(). Make sure we only call this once.
		 */
5981
		in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
5982
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5983 5984
		in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
		in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
5985 5986 5987 5988 5989 5990
#endif
	}

	return NOTIFY_OK;
}

L
Linus Torvalds 已提交
5991 5992 5993 5994 5995 5996 5997
/*
 *	/proc
 */

#ifdef CONFIG_PROC_FS
static int rt6_stats_seq_show(struct seq_file *seq, void *v)
{
5998
	struct net *net = (struct net *)seq->private;
L
Linus Torvalds 已提交
5999
	seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6000 6001
		   net->ipv6.rt6_stats->fib_nodes,
		   net->ipv6.rt6_stats->fib_route_nodes,
W
Wei Wang 已提交
6002
		   atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6003 6004
		   net->ipv6.rt6_stats->fib_rt_entries,
		   net->ipv6.rt6_stats->fib_rt_cache,
6005
		   dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6006
		   net->ipv6.rt6_stats->fib_discarded_routes);
L
Linus Torvalds 已提交
6007 6008 6009 6010 6011 6012 6013 6014

	return 0;
}
#endif	/* CONFIG_PROC_FS */

#ifdef CONFIG_SYSCTL

static
6015
int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
L
Linus Torvalds 已提交
6016 6017
			      void __user *buffer, size_t *lenp, loff_t *ppos)
{
6018 6019
	struct net *net;
	int delay;
6020
	int ret;
6021
	if (!write)
L
Linus Torvalds 已提交
6022
		return -EINVAL;
6023 6024 6025

	net = (struct net *)ctl->extra1;
	delay = net->ipv6.sysctl.flush_delay;
6026 6027 6028 6029
	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
	if (ret)
		return ret;

6030
	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6031
	return 0;
L
Linus Torvalds 已提交
6032 6033
}

6034 6035 6036
static int zero;
static int one = 1;

6037
static struct ctl_table ipv6_route_table_template[] = {
6038
	{
L
Linus Torvalds 已提交
6039
		.procname	=	"flush",
6040
		.data		=	&init_net.ipv6.sysctl.flush_delay,
L
Linus Torvalds 已提交
6041
		.maxlen		=	sizeof(int),
6042
		.mode		=	0200,
A
Alexey Dobriyan 已提交
6043
		.proc_handler	=	ipv6_sysctl_rtcache_flush
L
Linus Torvalds 已提交
6044 6045 6046
	},
	{
		.procname	=	"gc_thresh",
6047
		.data		=	&ip6_dst_ops_template.gc_thresh,
L
Linus Torvalds 已提交
6048 6049
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
6050
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
6051 6052 6053
	},
	{
		.procname	=	"max_size",
6054
		.data		=	&init_net.ipv6.sysctl.ip6_rt_max_size,
L
Linus Torvalds 已提交
6055 6056
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
6057
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
6058 6059 6060
	},
	{
		.procname	=	"gc_min_interval",
6061
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
L
Linus Torvalds 已提交
6062 6063
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
6064
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
6065 6066 6067
	},
	{
		.procname	=	"gc_timeout",
6068
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_timeout,
L
Linus Torvalds 已提交
6069 6070
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
6071
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
6072 6073 6074
	},
	{
		.procname	=	"gc_interval",
6075
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_interval,
L
Linus Torvalds 已提交
6076 6077
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
6078
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
6079 6080 6081
	},
	{
		.procname	=	"gc_elasticity",
6082
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
L
Linus Torvalds 已提交
6083 6084
		.maxlen		=	sizeof(int),
		.mode		=	0644,
6085
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
6086 6087 6088
	},
	{
		.procname	=	"mtu_expires",
6089
		.data		=	&init_net.ipv6.sysctl.ip6_rt_mtu_expires,
L
Linus Torvalds 已提交
6090 6091
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
6092
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
6093 6094 6095
	},
	{
		.procname	=	"min_adv_mss",
6096
		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
L
Linus Torvalds 已提交
6097 6098
		.maxlen		=	sizeof(int),
		.mode		=	0644,
6099
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
6100 6101 6102
	},
	{
		.procname	=	"gc_min_interval_ms",
6103
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
L
Linus Torvalds 已提交
6104 6105
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
6106
		.proc_handler	=	proc_dointvec_ms_jiffies,
L
Linus Torvalds 已提交
6107
	},
6108 6109 6110 6111 6112
	{
		.procname	=	"skip_notify_on_dev_down",
		.data		=	&init_net.ipv6.sysctl.skip_notify_on_dev_down,
		.maxlen		=	sizeof(int),
		.mode		=	0644,
6113
		.proc_handler	=	proc_dointvec_minmax,
6114 6115 6116
		.extra1		=	&zero,
		.extra2		=	&one,
	},
6117
	{ }
L
Linus Torvalds 已提交
6118 6119
};

6120
struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6121 6122 6123 6124 6125 6126
{
	struct ctl_table *table;

	table = kmemdup(ipv6_route_table_template,
			sizeof(ipv6_route_table_template),
			GFP_KERNEL);
6127 6128 6129

	if (table) {
		table[0].data = &net->ipv6.sysctl.flush_delay;
6130
		table[0].extra1 = net;
6131
		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6132 6133 6134 6135 6136 6137 6138
		table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
		table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
		table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
		table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6139
		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6140
		table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6141 6142 6143 6144

		/* Don't export sysctls to unprivileged users */
		if (net->user_ns != &init_user_ns)
			table[0].procname = NULL;
6145 6146
	}

6147 6148
	return table;
}
L
Linus Torvalds 已提交
6149 6150
#endif

6151
static int __net_init ip6_route_net_init(struct net *net)
6152
{
6153
	int ret = -ENOMEM;
6154

6155 6156
	memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
	       sizeof(net->ipv6.ip6_dst_ops));
6157

6158 6159 6160
	if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
		goto out_ip6_dst_ops;

6161
	net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
D
David Ahern 已提交
6162 6163
	if (!net->ipv6.fib6_null_entry)
		goto out_ip6_dst_entries;
6164 6165
	memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
	       sizeof(*net->ipv6.fib6_null_entry));
D
David Ahern 已提交
6166

6167 6168 6169 6170
	net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
					   sizeof(*net->ipv6.ip6_null_entry),
					   GFP_KERNEL);
	if (!net->ipv6.ip6_null_entry)
D
David Ahern 已提交
6171
		goto out_fib6_null_entry;
6172
	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6173 6174
	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
			 ip6_template_metrics, true);
6175
	INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
6176 6177

#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6178
	net->ipv6.fib6_has_custom_rules = false;
6179 6180 6181
	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
					       sizeof(*net->ipv6.ip6_prohibit_entry),
					       GFP_KERNEL);
6182 6183
	if (!net->ipv6.ip6_prohibit_entry)
		goto out_ip6_null_entry;
6184
	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6185 6186
	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
			 ip6_template_metrics, true);
6187
	INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
6188 6189 6190 6191

	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
					       sizeof(*net->ipv6.ip6_blk_hole_entry),
					       GFP_KERNEL);
6192 6193
	if (!net->ipv6.ip6_blk_hole_entry)
		goto out_ip6_prohibit_entry;
6194
	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6195 6196
	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
			 ip6_template_metrics, true);
6197
	INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
6198 6199
#endif

6200 6201 6202 6203 6204 6205 6206 6207
	net->ipv6.sysctl.flush_delay = 0;
	net->ipv6.sysctl.ip6_rt_max_size = 4096;
	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
	net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6208
	net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6209

6210 6211
	net->ipv6.ip6_rt_gc_expire = 30*HZ;

6212 6213 6214
	ret = 0;
out:
	return ret;
6215

6216 6217 6218 6219 6220 6221
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
out_ip6_prohibit_entry:
	kfree(net->ipv6.ip6_prohibit_entry);
out_ip6_null_entry:
	kfree(net->ipv6.ip6_null_entry);
#endif
D
David Ahern 已提交
6222 6223
out_fib6_null_entry:
	kfree(net->ipv6.fib6_null_entry);
6224 6225
out_ip6_dst_entries:
	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6226 6227
out_ip6_dst_ops:
	goto out;
6228 6229
}

6230
static void __net_exit ip6_route_net_exit(struct net *net)
6231
{
D
David Ahern 已提交
6232
	kfree(net->ipv6.fib6_null_entry);
6233 6234 6235 6236 6237
	kfree(net->ipv6.ip6_null_entry);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
	kfree(net->ipv6.ip6_prohibit_entry);
	kfree(net->ipv6.ip6_blk_hole_entry);
#endif
6238
	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6239 6240
}

6241 6242 6243
static int __net_init ip6_route_net_init_late(struct net *net)
{
#ifdef CONFIG_PROC_FS
6244 6245
	proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
			sizeof(struct ipv6_route_iter));
6246 6247
	proc_create_net_single("rt6_stats", 0444, net->proc_net,
			rt6_stats_seq_show, NULL);
6248 6249 6250 6251 6252 6253 6254
#endif
	return 0;
}

static void __net_exit ip6_route_net_exit_late(struct net *net)
{
#ifdef CONFIG_PROC_FS
6255 6256
	remove_proc_entry("ipv6_route", net->proc_net);
	remove_proc_entry("rt6_stats", net->proc_net);
6257 6258 6259
#endif
}

6260 6261 6262 6263 6264
static struct pernet_operations ip6_route_net_ops = {
	.init = ip6_route_net_init,
	.exit = ip6_route_net_exit,
};

6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280
static int __net_init ipv6_inetpeer_init(struct net *net)
{
	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);

	if (!bp)
		return -ENOMEM;
	inet_peer_base_init(bp);
	net->ipv6.peers = bp;
	return 0;
}

static void __net_exit ipv6_inetpeer_exit(struct net *net)
{
	struct inet_peer_base *bp = net->ipv6.peers;

	net->ipv6.peers = NULL;
6281
	inetpeer_invalidate_tree(bp);
6282 6283 6284
	kfree(bp);
}

6285
static struct pernet_operations ipv6_inetpeer_ops = {
6286 6287 6288 6289
	.init	=	ipv6_inetpeer_init,
	.exit	=	ipv6_inetpeer_exit,
};

6290 6291 6292 6293 6294
static struct pernet_operations ip6_route_net_late_ops = {
	.init = ip6_route_net_init_late,
	.exit = ip6_route_net_exit_late,
};

6295 6296
static struct notifier_block ip6_route_dev_notifier = {
	.notifier_call = ip6_route_dev_notify,
6297
	.priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6298 6299
};

6300 6301 6302 6303 6304
void __init ip6_route_init_special_entries(void)
{
	/* Registering of the loopback is done before this portion of code,
	 * the loopback reference in rt6_info will not be taken, do it
	 * manually for init_net */
6305
	init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6306 6307 6308 6309 6310 6311 6312 6313 6314 6315
	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
  #ifdef CONFIG_IPV6_MULTIPLE_TABLES
	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
  #endif
}

6316
int __init ip6_route_init(void)
L
Linus Torvalds 已提交
6317
{
6318
	int ret;
6319
	int cpu;
6320

6321 6322
	ret = -ENOMEM;
	ip6_dst_ops_template.kmem_cachep =
A
Alexey Dobriyan 已提交
6323
		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6324
				  SLAB_HWCACHE_ALIGN, NULL);
6325
	if (!ip6_dst_ops_template.kmem_cachep)
6326
		goto out;
6327

6328
	ret = dst_entries_init(&ip6_dst_blackhole_ops);
6329
	if (ret)
6330 6331
		goto out_kmem_cache;

6332 6333
	ret = register_pernet_subsys(&ipv6_inetpeer_ops);
	if (ret)
6334
		goto out_dst_entries;
6335

6336 6337 6338
	ret = register_pernet_subsys(&ip6_route_net_ops);
	if (ret)
		goto out_register_inetpeer;
6339

6340 6341
	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;

6342
	ret = fib6_init();
6343
	if (ret)
6344
		goto out_register_subsys;
6345 6346 6347

	ret = xfrm6_init();
	if (ret)
6348
		goto out_fib6_init;
6349

6350 6351 6352
	ret = fib6_rules_init();
	if (ret)
		goto xfrm6_init;
6353

6354 6355 6356 6357
	ret = register_pernet_subsys(&ip6_route_net_late_ops);
	if (ret)
		goto fib6_rules_init;

6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371
	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
				   inet6_rtm_newroute, NULL, 0);
	if (ret < 0)
		goto out_register_late_subsys;

	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
				   inet6_rtm_delroute, NULL, 0);
	if (ret < 0)
		goto out_register_late_subsys;

	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
				   inet6_rtm_getroute, NULL,
				   RTNL_FLAG_DOIT_UNLOCKED);
	if (ret < 0)
6372
		goto out_register_late_subsys;
6373

6374
	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6375
	if (ret)
6376
		goto out_register_late_subsys;
6377

6378 6379 6380 6381 6382 6383 6384
	for_each_possible_cpu(cpu) {
		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);

		INIT_LIST_HEAD(&ul->head);
		spin_lock_init(&ul->lock);
	}

6385 6386 6387
out:
	return ret;

6388
out_register_late_subsys:
6389
	rtnl_unregister_all(PF_INET6);
6390
	unregister_pernet_subsys(&ip6_route_net_late_ops);
6391 6392 6393 6394
fib6_rules_init:
	fib6_rules_cleanup();
xfrm6_init:
	xfrm6_fini();
6395 6396
out_fib6_init:
	fib6_gc_cleanup();
6397 6398
out_register_subsys:
	unregister_pernet_subsys(&ip6_route_net_ops);
6399 6400
out_register_inetpeer:
	unregister_pernet_subsys(&ipv6_inetpeer_ops);
6401 6402
out_dst_entries:
	dst_entries_destroy(&ip6_dst_blackhole_ops);
6403
out_kmem_cache:
6404
	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6405
	goto out;
L
Linus Torvalds 已提交
6406 6407 6408 6409
}

void ip6_route_cleanup(void)
{
6410
	unregister_netdevice_notifier(&ip6_route_dev_notifier);
6411
	unregister_pernet_subsys(&ip6_route_net_late_ops);
T
Thomas Graf 已提交
6412
	fib6_rules_cleanup();
L
Linus Torvalds 已提交
6413 6414
	xfrm6_fini();
	fib6_gc_cleanup();
6415
	unregister_pernet_subsys(&ipv6_inetpeer_ops);
6416
	unregister_pernet_subsys(&ip6_route_net_ops);
6417
	dst_entries_destroy(&ip6_dst_blackhole_ops);
6418
	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
L
Linus Torvalds 已提交
6419
}