route.c 133.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *	Linux INET6 implementation
 *	FIB front-end.
 *
 *	Authors:
6
 *	Pedro Roque		<roque@di.fc.ul.pt>
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 *	This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

/*	Changes:
 *
 *	YOSHIFUJI Hideaki @USAGI
 *		reworked default router selection.
 *		- respect outgoing interface
 *		- select from (probably) reachable routers (i.e.
 *		routers in REACHABLE, STALE, DELAY or PROBE states).
 *		- always select the same router if it is (probably)
 *		reachable.  otherwise, round-robin the list.
23 24
 *	Ville Nuorvala
 *		Fixed routing subtrees.
L
Linus Torvalds 已提交
25 26
 */

27 28
#define pr_fmt(fmt) "IPv6: " fmt

29
#include <linux/capability.h>
L
Linus Torvalds 已提交
30
#include <linux/errno.h>
31
#include <linux/export.h>
L
Linus Torvalds 已提交
32 33 34 35 36 37 38 39
#include <linux/types.h>
#include <linux/times.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/route.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
40
#include <linux/mroute6.h>
L
Linus Torvalds 已提交
41 42 43 44
#include <linux/init.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
45
#include <linux/nsproxy.h>
46
#include <linux/slab.h>
47
#include <linux/jhash.h>
48
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
49 50 51 52 53 54 55 56 57
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/tcp.h>
#include <linux/rtnetlink.h>
#include <net/dst.h>
58
#include <net/dst_metadata.h>
L
Linus Torvalds 已提交
59
#include <net/xfrm.h>
60
#include <net/netevent.h>
61
#include <net/netlink.h>
62
#include <net/nexthop.h>
63
#include <net/lwtunnel.h>
64
#include <net/ip_tunnels.h>
D
David Ahern 已提交
65
#include <net/l3mdev.h>
66
#include <net/ip.h>
67
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
68 69 70 71 72

#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif

73 74 75 76 77 78 79
static int ip6_rt_type_to_error(u8 fib6_type);

#define CREATE_TRACE_POINTS
#include <trace/events/fib6.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
#undef CREATE_TRACE_POINTS

80
enum rt6_nud_state {
J
Jiri Benc 已提交
81 82 83
	RT6_NUD_FAIL_HARD = -3,
	RT6_NUD_FAIL_PROBE = -2,
	RT6_NUD_FAIL_DO_RR = -1,
84 85 86
	RT6_NUD_SUCCEED = 1
};

L
Linus Torvalds 已提交
87
static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
88
static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
89
static unsigned int	 ip6_mtu(const struct dst_entry *dst);
L
Linus Torvalds 已提交
90 91 92 93
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void		ip6_dst_destroy(struct dst_entry *);
static void		ip6_dst_ifdown(struct dst_entry *,
				       struct net_device *dev, int how);
94
static int		 ip6_dst_gc(struct dst_ops *ops);
L
Linus Torvalds 已提交
95 96

static int		ip6_pkt_discard(struct sk_buff *skb);
E
Eric W. Biederman 已提交
97
static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
98
static int		ip6_pkt_prohibit(struct sk_buff *skb);
E
Eric W. Biederman 已提交
99
static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
L
Linus Torvalds 已提交
100
static void		ip6_link_failure(struct sk_buff *skb);
101 102 103 104
static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
					   struct sk_buff *skb, u32 mtu);
static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
					struct sk_buff *skb);
105 106
static int rt6_score_route(struct fib6_info *rt, int oif, int strict);
static size_t rt6_nlmsg_size(struct fib6_info *rt);
107
static int rt6_fill_node(struct net *net, struct sk_buff *skb,
108
			 struct fib6_info *rt, struct dst_entry *dst,
109
			 struct in6_addr *dest, struct in6_addr *src,
110 111
			 int iif, int type, u32 portid, u32 seq,
			 unsigned int flags);
112
static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
113 114
					   struct in6_addr *daddr,
					   struct in6_addr *saddr);
L
Linus Torvalds 已提交
115

116
#ifdef CONFIG_IPV6_ROUTE_INFO
117
static struct fib6_info *rt6_add_route_info(struct net *net,
118
					   const struct in6_addr *prefix, int prefixlen,
119 120
					   const struct in6_addr *gwaddr,
					   struct net_device *dev,
121
					   unsigned int pref);
122
static struct fib6_info *rt6_get_route_info(struct net *net,
123
					   const struct in6_addr *prefix, int prefixlen,
124 125
					   const struct in6_addr *gwaddr,
					   struct net_device *dev);
126 127
#endif

128 129 130 131 132 133 134
struct uncached_list {
	spinlock_t		lock;
	struct list_head	head;
};

static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);

135
void rt6_uncached_list_add(struct rt6_info *rt)
136 137 138 139 140 141 142 143 144 145
{
	struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);

	rt->rt6i_uncached_list = ul;

	spin_lock_bh(&ul->lock);
	list_add_tail(&rt->rt6i_uncached, &ul->head);
	spin_unlock_bh(&ul->lock);
}

146
void rt6_uncached_list_del(struct rt6_info *rt)
147 148 149
{
	if (!list_empty(&rt->rt6i_uncached)) {
		struct uncached_list *ul = rt->rt6i_uncached_list;
W
Wei Wang 已提交
150
		struct net *net = dev_net(rt->dst.dev);
151 152 153

		spin_lock_bh(&ul->lock);
		list_del(&rt->rt6i_uncached);
W
Wei Wang 已提交
154
		atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
155 156 157 158 159 160 161 162 163
		spin_unlock_bh(&ul->lock);
	}
}

static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
{
	struct net_device *loopback_dev = net->loopback_dev;
	int cpu;

164 165 166
	if (dev == loopback_dev)
		return;

167 168 169 170 171 172 173 174 175
	for_each_possible_cpu(cpu) {
		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
		struct rt6_info *rt;

		spin_lock_bh(&ul->lock);
		list_for_each_entry(rt, &ul->head, rt6i_uncached) {
			struct inet6_dev *rt_idev = rt->rt6i_idev;
			struct net_device *rt_dev = rt->dst.dev;

176
			if (rt_idev->dev == dev) {
177 178 179 180
				rt->rt6i_idev = in6_dev_get(loopback_dev);
				in6_dev_put(rt_idev);
			}

181
			if (rt_dev == dev) {
182 183 184 185 186 187 188 189 190
				rt->dst.dev = loopback_dev;
				dev_hold(rt->dst.dev);
				dev_put(rt_dev);
			}
		}
		spin_unlock_bh(&ul->lock);
	}
}

191
static inline const void *choose_neigh_daddr(const struct in6_addr *p,
192 193
					     struct sk_buff *skb,
					     const void *daddr)
194
{
D
David S. Miller 已提交
195
	if (!ipv6_addr_any(p))
196
		return (const void *) p;
197 198
	else if (skb)
		return &ipv6_hdr(skb)->daddr;
199 200 201
	return daddr;
}

202 203 204 205
struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
				   struct net_device *dev,
				   struct sk_buff *skb,
				   const void *daddr)
206
{
207 208
	struct neighbour *n;

209 210
	daddr = choose_neigh_daddr(gw, skb, daddr);
	n = __ipv6_neigh_lookup(dev, daddr);
211 212
	if (n)
		return n;
213 214 215

	n = neigh_create(&nd_tbl, daddr, dev);
	return IS_ERR(n) ? NULL : n;
216 217 218 219 220 221 222 223 224
}

static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
					      struct sk_buff *skb,
					      const void *daddr)
{
	const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);

	return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr);
225 226
}

227 228 229 230 231
static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
	struct net_device *dev = dst->dev;
	struct rt6_info *rt = (struct rt6_info *)dst;

232
	daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr);
233 234 235 236 237 238 239 240 241
	if (!daddr)
		return;
	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
		return;
	if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
		return;
	__ipv6_confirm_neigh(dev, daddr);
}

242
static struct dst_ops ip6_dst_ops_template = {
L
Linus Torvalds 已提交
243 244 245 246
	.family			=	AF_INET6,
	.gc			=	ip6_dst_gc,
	.gc_thresh		=	1024,
	.check			=	ip6_dst_check,
247
	.default_advmss		=	ip6_default_advmss,
248
	.mtu			=	ip6_mtu,
249
	.cow_metrics		=	dst_cow_metrics_generic,
L
Linus Torvalds 已提交
250 251 252 253 254
	.destroy		=	ip6_dst_destroy,
	.ifdown			=	ip6_dst_ifdown,
	.negative_advice	=	ip6_negative_advice,
	.link_failure		=	ip6_link_failure,
	.update_pmtu		=	ip6_rt_update_pmtu,
255
	.redirect		=	rt6_do_redirect,
256
	.local_out		=	__ip6_local_out,
257
	.neigh_lookup		=	ip6_dst_neigh_lookup,
258
	.confirm_neigh		=	ip6_confirm_neigh,
L
Linus Torvalds 已提交
259 260
};

261
static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
262
{
263 264 265
	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);

	return mtu ? : dst->dev->mtu;
266 267
}

268 269
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
					 struct sk_buff *skb, u32 mtu)
270 271 272
{
}

273 274
static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
				      struct sk_buff *skb)
275 276 277
{
}

278 279 280 281
static struct dst_ops ip6_dst_blackhole_ops = {
	.family			=	AF_INET6,
	.destroy		=	ip6_dst_destroy,
	.check			=	ip6_dst_check,
282
	.mtu			=	ip6_blackhole_mtu,
283
	.default_advmss		=	ip6_default_advmss,
284
	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
285
	.redirect		=	ip6_rt_blackhole_redirect,
286
	.cow_metrics		=	dst_cow_metrics_generic,
287
	.neigh_lookup		=	ip6_dst_neigh_lookup,
288 289
};

290
static const u32 ip6_template_metrics[RTAX_MAX] = {
L
Li RongQing 已提交
291
	[RTAX_HOPLIMIT - 1] = 0,
292 293
};

294
static const struct fib6_info fib6_null_entry_template = {
295 296 297 298
	.fib6_flags	= (RTF_REJECT | RTF_NONEXTHOP),
	.fib6_protocol  = RTPROT_KERNEL,
	.fib6_metric	= ~(u32)0,
	.fib6_ref	= ATOMIC_INIT(1),
D
David Ahern 已提交
299 300 301 302
	.fib6_type	= RTN_UNREACHABLE,
	.fib6_metrics	= (struct dst_metrics *)&dst_default_metrics,
};

303
static const struct rt6_info ip6_null_entry_template = {
304 305 306
	.dst = {
		.__refcnt	= ATOMIC_INIT(1),
		.__use		= 1,
307
		.obsolete	= DST_OBSOLETE_FORCE_CHK,
308 309 310
		.error		= -ENETUNREACH,
		.input		= ip6_pkt_discard,
		.output		= ip6_pkt_discard_out,
L
Linus Torvalds 已提交
311 312 313 314
	},
	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
};

T
Thomas Graf 已提交
315 316
#ifdef CONFIG_IPV6_MULTIPLE_TABLES

317
static const struct rt6_info ip6_prohibit_entry_template = {
318 319 320
	.dst = {
		.__refcnt	= ATOMIC_INIT(1),
		.__use		= 1,
321
		.obsolete	= DST_OBSOLETE_FORCE_CHK,
322 323 324
		.error		= -EACCES,
		.input		= ip6_pkt_prohibit,
		.output		= ip6_pkt_prohibit_out,
T
Thomas Graf 已提交
325 326 327 328
	},
	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
};

329
static const struct rt6_info ip6_blk_hole_entry_template = {
330 331 332
	.dst = {
		.__refcnt	= ATOMIC_INIT(1),
		.__use		= 1,
333
		.obsolete	= DST_OBSOLETE_FORCE_CHK,
334 335
		.error		= -EINVAL,
		.input		= dst_discard,
E
Eric W. Biederman 已提交
336
		.output		= dst_discard_out,
T
Thomas Graf 已提交
337 338 339 340 341 342
	},
	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
};

#endif

343 344 345 346 347 348 349 350
static void rt6_info_init(struct rt6_info *rt)
{
	struct dst_entry *dst = &rt->dst;

	memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
	INIT_LIST_HEAD(&rt->rt6i_uncached);
}

L
Linus Torvalds 已提交
351
/* allocate dst with ip6_dst_ops */
352 353
struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
			       int flags)
L
Linus Torvalds 已提交
354
{
355
	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
W
Wei Wang 已提交
356
					1, DST_OBSOLETE_FORCE_CHK, flags);
357

W
Wei Wang 已提交
358
	if (rt) {
359
		rt6_info_init(rt);
W
Wei Wang 已提交
360 361
		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
	}
362

363
	return rt;
L
Linus Torvalds 已提交
364
}
365
EXPORT_SYMBOL(ip6_dst_alloc);
M
Martin KaFai Lau 已提交
366

L
Linus Torvalds 已提交
367 368
static void ip6_dst_destroy(struct dst_entry *dst)
{
369
	struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
L
Linus Torvalds 已提交
370
	struct rt6_info *rt = (struct rt6_info *)dst;
371
	struct fib6_info *from;
372
	struct inet6_dev *idev;
L
Linus Torvalds 已提交
373

374 375 376
	if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
		kfree(p);

377 378 379
	rt6_uncached_list_del(rt);

	idev = rt->rt6i_idev;
380
	if (idev) {
L
Linus Torvalds 已提交
381 382
		rt->rt6i_idev = NULL;
		in6_dev_put(idev);
383
	}
384

385 386 387
	rcu_read_lock();
	from = rcu_dereference(rt->from);
	rcu_assign_pointer(rt->from, NULL);
388
	fib6_info_release(from);
389
	rcu_read_unlock();
390 391
}

L
Linus Torvalds 已提交
392 393 394 395 396
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
			   int how)
{
	struct rt6_info *rt = (struct rt6_info *)dst;
	struct inet6_dev *idev = rt->rt6i_idev;
397
	struct net_device *loopback_dev =
398
		dev_net(dev)->loopback_dev;
L
Linus Torvalds 已提交
399

400 401 402 403 404
	if (idev && idev->dev != loopback_dev) {
		struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
		if (loopback_idev) {
			rt->rt6i_idev = loopback_idev;
			in6_dev_put(idev);
405
		}
L
Linus Torvalds 已提交
406 407 408
	}
}

409 410 411 412 413 414 415 416
static bool __rt6_check_expired(const struct rt6_info *rt)
{
	if (rt->rt6i_flags & RTF_EXPIRES)
		return time_after(jiffies, rt->dst.expires);
	else
		return false;
}

417
static bool rt6_check_expired(const struct rt6_info *rt)
L
Linus Torvalds 已提交
418
{
419 420 421 422
	struct fib6_info *from;

	from = rcu_dereference(rt->from);

423 424
	if (rt->rt6i_flags & RTF_EXPIRES) {
		if (time_after(jiffies, rt->dst.expires))
425
			return true;
426
	} else if (from) {
427
		return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
428
			fib6_check_expired(from);
429
	}
430
	return false;
L
Linus Torvalds 已提交
431 432
}

433 434 435 436 437
struct fib6_info *fib6_multipath_select(const struct net *net,
					struct fib6_info *match,
					struct flowi6 *fl6, int oif,
					const struct sk_buff *skb,
					int strict)
438
{
439
	struct fib6_info *sibling, *next_sibling;
440

441 442 443 444
	/* We might have already computed the hash for ICMPv6 errors. In such
	 * case it will always be non-zero. Otherwise now is the time to do it.
	 */
	if (!fl6->mp_hash)
445
		fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
446

447
	if (fl6->mp_hash <= atomic_read(&match->fib6_nh.nh_upper_bound))
448 449
		return match;

450 451
	list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
				 fib6_siblings) {
452 453 454 455
		int nh_upper_bound;

		nh_upper_bound = atomic_read(&sibling->fib6_nh.nh_upper_bound);
		if (fl6->mp_hash > nh_upper_bound)
456 457 458 459 460 461 462
			continue;
		if (rt6_score_route(sibling, oif, strict) < 0)
			break;
		match = sibling;
		break;
	}

463 464 465
	return match;
}

L
Linus Torvalds 已提交
466
/*
467
 *	Route lookup. rcu_read_lock() should be held.
L
Linus Torvalds 已提交
468 469
 */

470 471
static inline struct fib6_info *rt6_device_match(struct net *net,
						 struct fib6_info *rt,
472
						    const struct in6_addr *saddr,
L
Linus Torvalds 已提交
473
						    int oif,
474
						    int flags)
L
Linus Torvalds 已提交
475
{
476
	struct fib6_info *sprt;
L
Linus Torvalds 已提交
477

478 479
	if (!oif && ipv6_addr_any(saddr) &&
	    !(rt->fib6_nh.nh_flags & RTNH_F_DEAD))
480
		return rt;
481

482
	for (sprt = rt; sprt; sprt = rcu_dereference(sprt->fib6_next)) {
483
		const struct net_device *dev = sprt->fib6_nh.nh_dev;
484

485
		if (sprt->fib6_nh.nh_flags & RTNH_F_DEAD)
486 487
			continue;

488
		if (oif) {
L
Linus Torvalds 已提交
489 490
			if (dev->ifindex == oif)
				return sprt;
491 492 493 494
		} else {
			if (ipv6_chk_addr(net, saddr, dev,
					  flags & RT6_LOOKUP_F_IFACE))
				return sprt;
L
Linus Torvalds 已提交
495
		}
496
	}
L
Linus Torvalds 已提交
497

498 499
	if (oif && flags & RT6_LOOKUP_F_IFACE)
		return net->ipv6.fib6_null_entry;
500

D
David Ahern 已提交
501
	return rt->fib6_nh.nh_flags & RTNH_F_DEAD ? net->ipv6.fib6_null_entry : rt;
L
Linus Torvalds 已提交
502 503
}

504
#ifdef CONFIG_IPV6_ROUTER_PREF
505 506 507 508 509 510 511 512 513 514 515 516 517
struct __rt6_probe_work {
	struct work_struct work;
	struct in6_addr target;
	struct net_device *dev;
};

static void rt6_probe_deferred(struct work_struct *w)
{
	struct in6_addr mcaddr;
	struct __rt6_probe_work *work =
		container_of(w, struct __rt6_probe_work, work);

	addrconf_addr_solict_mult(&work->target, &mcaddr);
518
	ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
519
	dev_put(work->dev);
520
	kfree(work);
521 522
}

523
static void rt6_probe(struct fib6_info *rt)
524
{
525
	struct __rt6_probe_work *work = NULL;
526
	const struct in6_addr *nh_gw;
527
	struct neighbour *neigh;
528
	struct net_device *dev;
529
	struct inet6_dev *idev;
530

531 532 533 534 535 536 537 538
	/*
	 * Okay, this does not seem to be appropriate
	 * for now, however, we need to check if it
	 * is really so; aka Router Reachability Probing.
	 *
	 * Router Reachability Probe MUST be rate-limited
	 * to no more than one per minute.
	 */
539
	if (!rt || !(rt->fib6_flags & RTF_GATEWAY))
540
		return;
541 542 543

	nh_gw = &rt->fib6_nh.nh_gw;
	dev = rt->fib6_nh.nh_dev;
544
	rcu_read_lock_bh();
545
	idev = __in6_dev_get(dev);
546
	neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
547
	if (neigh) {
548 549 550
		if (neigh->nud_state & NUD_VALID)
			goto out;

551
		write_lock(&neigh->lock);
552 553
		if (!(neigh->nud_state & NUD_VALID) &&
		    time_after(jiffies,
D
David Ahern 已提交
554
			       neigh->updated + idev->cnf.rtr_probe_interval)) {
555 556 557
			work = kmalloc(sizeof(*work), GFP_ATOMIC);
			if (work)
				__neigh_set_probe_once(neigh);
558
		}
559
		write_unlock(&neigh->lock);
560 561
	} else if (time_after(jiffies, rt->last_probe +
				       idev->cnf.rtr_probe_interval)) {
562
		work = kmalloc(sizeof(*work), GFP_ATOMIC);
563
	}
564 565

	if (work) {
566
		rt->last_probe = jiffies;
567
		INIT_WORK(&work->work, rt6_probe_deferred);
568 569 570
		work->target = *nh_gw;
		dev_hold(dev);
		work->dev = dev;
571 572 573
		schedule_work(&work->work);
	}

574
out:
575
	rcu_read_unlock_bh();
576 577
}
#else
578
static inline void rt6_probe(struct fib6_info *rt)
579 580 581 582
{
}
#endif

L
Linus Torvalds 已提交
583
/*
584
 * Default Router Selection (RFC 2461 6.3.6)
L
Linus Torvalds 已提交
585
 */
586
static inline int rt6_check_dev(struct fib6_info *rt, int oif)
587
{
588 589
	const struct net_device *dev = rt->fib6_nh.nh_dev;

590
	if (!oif || dev->ifindex == oif)
591
		return 2;
592
	return 0;
593
}
L
Linus Torvalds 已提交
594

595
static inline enum rt6_nud_state rt6_check_neigh(struct fib6_info *rt)
L
Linus Torvalds 已提交
596
{
597
	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
598
	struct neighbour *neigh;
599

600 601
	if (rt->fib6_flags & RTF_NONEXTHOP ||
	    !(rt->fib6_flags & RTF_GATEWAY))
602
		return RT6_NUD_SUCCEED;
603 604

	rcu_read_lock_bh();
605 606
	neigh = __ipv6_neigh_lookup_noref(rt->fib6_nh.nh_dev,
					  &rt->fib6_nh.nh_gw);
607 608
	if (neigh) {
		read_lock(&neigh->lock);
609
		if (neigh->nud_state & NUD_VALID)
610
			ret = RT6_NUD_SUCCEED;
611
#ifdef CONFIG_IPV6_ROUTER_PREF
612
		else if (!(neigh->nud_state & NUD_FAILED))
613
			ret = RT6_NUD_SUCCEED;
J
Jiri Benc 已提交
614 615
		else
			ret = RT6_NUD_FAIL_PROBE;
616
#endif
617
		read_unlock(&neigh->lock);
618 619
	} else {
		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
J
Jiri Benc 已提交
620
		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
621
	}
622 623
	rcu_read_unlock_bh();

624
	return ret;
L
Linus Torvalds 已提交
625 626
}

627
static int rt6_score_route(struct fib6_info *rt, int oif, int strict)
L
Linus Torvalds 已提交
628
{
629
	int m;
630

631
	m = rt6_check_dev(rt, oif);
632
	if (!m && (strict & RT6_LOOKUP_F_IFACE))
633
		return RT6_NUD_FAIL_HARD;
634
#ifdef CONFIG_IPV6_ROUTER_PREF
635
	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->fib6_flags)) << 2;
636
#endif
637 638 639 640 641
	if (strict & RT6_LOOKUP_F_REACHABLE) {
		int n = rt6_check_neigh(rt);
		if (n < 0)
			return n;
	}
642 643 644
	return m;
}

D
David Ahern 已提交
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
/* called with rc_read_lock held */
static inline bool fib6_ignore_linkdown(const struct fib6_info *f6i)
{
	const struct net_device *dev = fib6_info_nh_dev(f6i);
	bool rc = false;

	if (dev) {
		const struct inet6_dev *idev = __in6_dev_get(dev);

		rc = !!idev->cnf.ignore_routes_with_linkdown;
	}

	return rc;
}

660 661
static struct fib6_info *find_match(struct fib6_info *rt, int oif, int strict,
				   int *mpri, struct fib6_info *match,
662
				   bool *do_rr)
663
{
664
	int m;
665
	bool match_do_rr = false;
666

667
	if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
668 669
		goto out;

D
David Ahern 已提交
670
	if (fib6_ignore_linkdown(rt) &&
671
	    rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
672
	    !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
673
		goto out;
674

675
	if (fib6_check_expired(rt))
676 677 678
		goto out;

	m = rt6_score_route(rt, oif, strict);
J
Jiri Benc 已提交
679
	if (m == RT6_NUD_FAIL_DO_RR) {
680 681
		match_do_rr = true;
		m = 0; /* lowest valid score */
J
Jiri Benc 已提交
682
	} else if (m == RT6_NUD_FAIL_HARD) {
683
		goto out;
684 685 686 687
	}

	if (strict & RT6_LOOKUP_F_REACHABLE)
		rt6_probe(rt);
688

J
Jiri Benc 已提交
689
	/* note that m can be RT6_NUD_FAIL_PROBE at this point */
690
	if (m > *mpri) {
691
		*do_rr = match_do_rr;
692 693 694 695 696 697 698
		*mpri = m;
		match = rt;
	}
out:
	return match;
}

699 700 701
static struct fib6_info *find_rr_leaf(struct fib6_node *fn,
				     struct fib6_info *leaf,
				     struct fib6_info *rr_head,
702 703
				     u32 metric, int oif, int strict,
				     bool *do_rr)
704
{
705
	struct fib6_info *rt, *match, *cont;
706
	int mpri = -1;
L
Linus Torvalds 已提交
707

708
	match = NULL;
709
	cont = NULL;
710
	for (rt = rr_head; rt; rt = rcu_dereference(rt->fib6_next)) {
711
		if (rt->fib6_metric != metric) {
712 713 714 715 716 717 718
			cont = rt;
			break;
		}

		match = find_match(rt, oif, strict, &mpri, match, do_rr);
	}

719
	for (rt = leaf; rt && rt != rr_head;
720
	     rt = rcu_dereference(rt->fib6_next)) {
721
		if (rt->fib6_metric != metric) {
722 723 724 725
			cont = rt;
			break;
		}

726
		match = find_match(rt, oif, strict, &mpri, match, do_rr);
727 728 729 730 731
	}

	if (match || !cont)
		return match;

732
	for (rt = cont; rt; rt = rcu_dereference(rt->fib6_next))
733
		match = find_match(rt, oif, strict, &mpri, match, do_rr);
L
Linus Torvalds 已提交
734

735 736
	return match;
}
L
Linus Torvalds 已提交
737

738
static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn,
W
Wei Wang 已提交
739
				   int oif, int strict)
740
{
741 742
	struct fib6_info *leaf = rcu_dereference(fn->leaf);
	struct fib6_info *match, *rt0;
743
	bool do_rr = false;
744
	int key_plen;
L
Linus Torvalds 已提交
745

D
David Ahern 已提交
746 747
	if (!leaf || leaf == net->ipv6.fib6_null_entry)
		return net->ipv6.fib6_null_entry;
W
Wei Wang 已提交
748

749
	rt0 = rcu_dereference(fn->rr_ptr);
750
	if (!rt0)
751
		rt0 = leaf;
L
Linus Torvalds 已提交
752

753 754 755 756 757
	/* Double check to make sure fn is not an intermediate node
	 * and fn->leaf does not points to its child's leaf
	 * (This might happen if all routes under fn are deleted from
	 * the tree and fib6_repair_tree() is called on the node.)
	 */
758
	key_plen = rt0->fib6_dst.plen;
759
#ifdef CONFIG_IPV6_SUBTREES
760 761
	if (rt0->fib6_src.plen)
		key_plen = rt0->fib6_src.plen;
762 763
#endif
	if (fn->fn_bit != key_plen)
D
David Ahern 已提交
764
		return net->ipv6.fib6_null_entry;
765

766
	match = find_rr_leaf(fn, leaf, rt0, rt0->fib6_metric, oif, strict,
767
			     &do_rr);
L
Linus Torvalds 已提交
768

769
	if (do_rr) {
770
		struct fib6_info *next = rcu_dereference(rt0->fib6_next);
771

772
		/* no entries matched; do round-robin */
773
		if (!next || next->fib6_metric != rt0->fib6_metric)
W
Wei Wang 已提交
774
			next = leaf;
775

776
		if (next != rt0) {
777
			spin_lock_bh(&leaf->fib6_table->tb6_lock);
778
			/* make sure next is not being deleted from the tree */
779
			if (next->fib6_node)
780
				rcu_assign_pointer(fn->rr_ptr, next);
781
			spin_unlock_bh(&leaf->fib6_table->tb6_lock);
782
		}
L
Linus Torvalds 已提交
783 784
	}

D
David Ahern 已提交
785
	return match ? match : net->ipv6.fib6_null_entry;
L
Linus Torvalds 已提交
786 787
}

788
static bool rt6_is_gw_or_nonexthop(const struct fib6_info *rt)
789
{
790
	return (rt->fib6_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
791 792
}

793 794
#ifdef CONFIG_IPV6_ROUTE_INFO
int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
795
		  const struct in6_addr *gwaddr)
796
{
797
	struct net *net = dev_net(dev);
798 799 800
	struct route_info *rinfo = (struct route_info *) opt;
	struct in6_addr prefix_buf, *prefix;
	unsigned int pref;
801
	unsigned long lifetime;
802
	struct fib6_info *rt;
803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824

	if (len < sizeof(struct route_info)) {
		return -EINVAL;
	}

	/* Sanity check for prefix_len and length */
	if (rinfo->length > 3) {
		return -EINVAL;
	} else if (rinfo->prefix_len > 128) {
		return -EINVAL;
	} else if (rinfo->prefix_len > 64) {
		if (rinfo->length < 2) {
			return -EINVAL;
		}
	} else if (rinfo->prefix_len > 0) {
		if (rinfo->length < 1) {
			return -EINVAL;
		}
	}

	pref = rinfo->route_pref;
	if (pref == ICMPV6_ROUTER_PREF_INVALID)
825
		return -EINVAL;
826

827
	lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
828 829 830 831 832 833 834 835 836 837 838

	if (rinfo->length == 3)
		prefix = (struct in6_addr *)rinfo->prefix;
	else {
		/* this function is safe */
		ipv6_addr_prefix(&prefix_buf,
				 (struct in6_addr *)rinfo->prefix,
				 rinfo->prefix_len);
		prefix = &prefix_buf;
	}

839
	if (rinfo->prefix_len == 0)
840
		rt = rt6_get_dflt_router(net, gwaddr, dev);
841 842
	else
		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
843
					gwaddr, dev);
844 845

	if (rt && !lifetime) {
846
		ip6_del_rt(net, rt);
847 848 849 850
		rt = NULL;
	}

	if (!rt && lifetime)
851 852
		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
					dev, pref);
853
	else if (rt)
854 855
		rt->fib6_flags = RTF_ROUTEINFO |
				 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
856 857

	if (rt) {
858
		if (!addrconf_finite_timeout(lifetime))
859
			fib6_clean_expires(rt);
860
		else
861
			fib6_set_expires(rt, jiffies + HZ * lifetime);
862

863
		fib6_info_release(rt);
864 865 866 867 868
	}
	return 0;
}
#endif

869 870 871 872 873
/*
 *	Misc support functions
 */

/* called with rcu_lock held */
874
static struct net_device *ip6_rt_get_dev_rcu(struct fib6_info *rt)
875
{
876
	struct net_device *dev = rt->fib6_nh.nh_dev;
877

878
	if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
879 880 881 882 883
		/* for copies of local routes, dst->dev needs to be the
		 * device if it is a master device, the master device if
		 * device is enslaved, and the loopback as the default
		 */
		if (netif_is_l3_slave(dev) &&
884
		    !rt6_need_strict(&rt->fib6_dst.addr))
885 886 887 888 889 890 891 892 893 894 895
			dev = l3mdev_master_dev_rcu(dev);
		else if (!netif_is_l3_master(dev))
			dev = dev_net(dev)->loopback_dev;
		/* last case is netif_is_l3_master(dev) is true in which
		 * case we want dev returned to be dev
		 */
	}

	return dev;
}

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
static const int fib6_prop[RTN_MAX + 1] = {
	[RTN_UNSPEC]	= 0,
	[RTN_UNICAST]	= 0,
	[RTN_LOCAL]	= 0,
	[RTN_BROADCAST]	= 0,
	[RTN_ANYCAST]	= 0,
	[RTN_MULTICAST]	= 0,
	[RTN_BLACKHOLE]	= -EINVAL,
	[RTN_UNREACHABLE] = -EHOSTUNREACH,
	[RTN_PROHIBIT]	= -EACCES,
	[RTN_THROW]	= -EAGAIN,
	[RTN_NAT]	= -EINVAL,
	[RTN_XRESOLVE]	= -EINVAL,
};

static int ip6_rt_type_to_error(u8 fib6_type)
{
	return fib6_prop[fib6_type];
}

916
static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
917 918 919 920 921 922 923 924 925 926 927 928 929
{
	unsigned short flags = 0;

	if (rt->dst_nocount)
		flags |= DST_NOCOUNT;
	if (rt->dst_nopolicy)
		flags |= DST_NOPOLICY;
	if (rt->dst_host)
		flags |= DST_HOST;

	return flags;
}

930
static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
{
	rt->dst.error = ip6_rt_type_to_error(ort->fib6_type);

	switch (ort->fib6_type) {
	case RTN_BLACKHOLE:
		rt->dst.output = dst_discard_out;
		rt->dst.input = dst_discard;
		break;
	case RTN_PROHIBIT:
		rt->dst.output = ip6_pkt_prohibit_out;
		rt->dst.input = ip6_pkt_prohibit;
		break;
	case RTN_THROW:
	case RTN_UNREACHABLE:
	default:
		rt->dst.output = ip6_pkt_discard_out;
		rt->dst.input = ip6_pkt_discard;
		break;
	}
}

952
static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
953
{
954
	if (ort->fib6_flags & RTF_REJECT) {
955 956 957 958 959 960 961
		ip6_rt_init_dst_reject(rt, ort);
		return;
	}

	rt->dst.error = 0;
	rt->dst.output = ip6_output;

962
	if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
963
		rt->dst.input = ip6_input;
964
	} else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
965 966 967 968 969 970 971 972 973 974 975 976 977
		rt->dst.input = ip6_mc_input;
	} else {
		rt->dst.input = ip6_forward;
	}

	if (ort->fib6_nh.nh_lwtstate) {
		rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
		lwtunnel_set_redirect(&rt->dst);
	}

	rt->dst.lastuse = jiffies;
}

978
/* Caller must already hold reference to @from */
979
static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
980 981
{
	rt->rt6i_flags &= ~RTF_EXPIRES;
982
	rcu_assign_pointer(rt->from, from);
983
	dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
984 985 986 987
	if (from->fib6_metrics != &dst_default_metrics) {
		rt->dst._metrics |= DST_METRICS_REFCOUNTED;
		refcount_inc(&from->fib6_metrics->refcnt);
	}
988 989
}

990
/* Caller must already hold reference to @ort */
991
static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
992
{
D
David Ahern 已提交
993 994
	struct net_device *dev = fib6_info_nh_dev(ort);

995 996
	ip6_rt_init_dst(rt, ort);

997
	rt->rt6i_dst = ort->fib6_dst;
D
David Ahern 已提交
998
	rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
999
	rt->rt6i_gateway = ort->fib6_nh.nh_gw;
1000
	rt->rt6i_flags = ort->fib6_flags;
1001 1002
	rt6_set_from(rt, ort);
#ifdef CONFIG_IPV6_SUBTREES
1003
	rt->rt6i_src = ort->fib6_src;
1004
#endif
1005
	rt->rt6i_prefsrc = ort->fib6_prefsrc;
1006 1007
}

M
Martin KaFai Lau 已提交
1008 1009 1010
static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
					struct in6_addr *saddr)
{
1011
	struct fib6_node *pn, *sn;
M
Martin KaFai Lau 已提交
1012 1013 1014
	while (1) {
		if (fn->fn_flags & RTN_TL_ROOT)
			return NULL;
1015 1016 1017
		pn = rcu_dereference(fn->parent);
		sn = FIB6_SUBTREE(pn);
		if (sn && sn != fn)
1018
			fn = fib6_node_lookup(sn, NULL, saddr);
M
Martin KaFai Lau 已提交
1019 1020 1021 1022 1023 1024
		else
			fn = pn;
		if (fn->fn_flags & RTN_RTINFO)
			return fn;
	}
}
T
Thomas Graf 已提交
1025

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
static bool ip6_hold_safe(struct net *net, struct rt6_info **prt,
			  bool null_fallback)
{
	struct rt6_info *rt = *prt;

	if (dst_hold_safe(&rt->dst))
		return true;
	if (null_fallback) {
		rt = net->ipv6.ip6_null_entry;
		dst_hold(&rt->dst);
	} else {
		rt = NULL;
	}
	*prt = rt;
	return false;
}

1043
/* called with rcu_lock held */
1044
static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
1045
{
1046
	unsigned short flags = fib6_info_dst_flags(rt);
1047 1048 1049
	struct net_device *dev = rt->fib6_nh.nh_dev;
	struct rt6_info *nrt;

1050 1051 1052
	if (!fib6_info_hold_safe(rt))
		return NULL;

1053
	nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1054 1055
	if (nrt)
		ip6_rt_copy_init(nrt, rt);
1056 1057
	else
		fib6_info_release(rt);
1058 1059 1060 1061

	return nrt;
}

1062 1063
static struct rt6_info *ip6_pol_route_lookup(struct net *net,
					     struct fib6_table *table,
D
David Ahern 已提交
1064 1065 1066
					     struct flowi6 *fl6,
					     const struct sk_buff *skb,
					     int flags)
L
Linus Torvalds 已提交
1067
{
1068
	struct fib6_info *f6i;
L
Linus Torvalds 已提交
1069
	struct fib6_node *fn;
1070
	struct rt6_info *rt;
L
Linus Torvalds 已提交
1071

1072 1073 1074
	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
		flags &= ~RT6_LOOKUP_F_IFACE;

1075
	rcu_read_lock();
1076
	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
T
Thomas Graf 已提交
1077
restart:
1078 1079 1080
	f6i = rcu_dereference(fn->leaf);
	if (!f6i) {
		f6i = net->ipv6.fib6_null_entry;
1081
	} else {
1082
		f6i = rt6_device_match(net, f6i, &fl6->saddr,
1083
				      fl6->flowi6_oif, flags);
1084
		if (f6i->fib6_nsiblings && fl6->flowi6_oif == 0)
1085 1086 1087
			f6i = fib6_multipath_select(net, f6i, fl6,
						    fl6->flowi6_oif, skb,
						    flags);
1088
	}
1089
	if (f6i == net->ipv6.fib6_null_entry) {
M
Martin KaFai Lau 已提交
1090 1091 1092 1093
		fn = fib6_backtrack(fn, &fl6->saddr);
		if (fn)
			goto restart;
	}
1094

1095
	trace_fib6_table_lookup(net, f6i, table, fl6);
1096

1097
	/* Search through exception table */
1098 1099
	rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr);
	if (rt) {
1100 1101
		if (ip6_hold_safe(net, &rt, true))
			dst_use_noref(&rt->dst, jiffies);
1102
	} else if (f6i == net->ipv6.fib6_null_entry) {
1103 1104
		rt = net->ipv6.ip6_null_entry;
		dst_hold(&rt->dst);
1105 1106 1107 1108 1109 1110
	} else {
		rt = ip6_create_rt_rcu(f6i);
		if (!rt) {
			rt = net->ipv6.ip6_null_entry;
			dst_hold(&rt->dst);
		}
1111
	}
D
David Ahern 已提交
1112

1113
	rcu_read_unlock();
D
David Ahern 已提交
1114

T
Thomas Graf 已提交
1115 1116 1117
	return rt;
}

1118
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
D
David Ahern 已提交
1119
				   const struct sk_buff *skb, int flags)
F
Florian Westphal 已提交
1120
{
D
David Ahern 已提交
1121
	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
F
Florian Westphal 已提交
1122 1123 1124
}
EXPORT_SYMBOL_GPL(ip6_route_lookup);

1125
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
D
David Ahern 已提交
1126 1127
			    const struct in6_addr *saddr, int oif,
			    const struct sk_buff *skb, int strict)
T
Thomas Graf 已提交
1128
{
1129 1130 1131
	struct flowi6 fl6 = {
		.flowi6_oif = oif,
		.daddr = *daddr,
T
Thomas Graf 已提交
1132 1133
	};
	struct dst_entry *dst;
1134
	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
T
Thomas Graf 已提交
1135

1136
	if (saddr) {
1137
		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1138 1139 1140
		flags |= RT6_LOOKUP_F_HAS_SADDR;
	}

D
David Ahern 已提交
1141
	dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
T
Thomas Graf 已提交
1142 1143 1144 1145 1146
	if (dst->error == 0)
		return (struct rt6_info *) dst;

	dst_release(dst);

L
Linus Torvalds 已提交
1147 1148
	return NULL;
}
1149 1150
EXPORT_SYMBOL(rt6_lookup);

T
Thomas Graf 已提交
1151
/* ip6_ins_rt is called with FREE table->tb6_lock.
1152 1153 1154
 * It takes new route entry, the addition fails by any reason the
 * route is released.
 * Caller must hold dst before calling it.
L
Linus Torvalds 已提交
1155 1156
 */

1157
static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1158
			struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
1159 1160
{
	int err;
T
Thomas Graf 已提交
1161
	struct fib6_table *table;
L
Linus Torvalds 已提交
1162

1163
	table = rt->fib6_table;
1164
	spin_lock_bh(&table->tb6_lock);
1165
	err = fib6_add(&table->tb6_root, rt, info, extack);
1166
	spin_unlock_bh(&table->tb6_lock);
L
Linus Torvalds 已提交
1167 1168 1169 1170

	return err;
}

1171
int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1172
{
1173
	struct nl_info info = {	.nl_net = net, };
1174

1175
	return __ip6_ins_rt(rt, &info, NULL);
1176 1177
}

1178
static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
1179 1180
					   const struct in6_addr *daddr,
					   const struct in6_addr *saddr)
L
Linus Torvalds 已提交
1181
{
1182
	struct net_device *dev;
L
Linus Torvalds 已提交
1183 1184 1185 1186 1187 1188
	struct rt6_info *rt;

	/*
	 *	Clone the route.
	 */

1189 1190 1191
	if (!fib6_info_hold_safe(ort))
		return NULL;

1192
	dev = ip6_rt_get_dev_rcu(ort);
1193
	rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1194 1195
	if (!rt) {
		fib6_info_release(ort);
M
Martin KaFai Lau 已提交
1196
		return NULL;
1197
	}
M
Martin KaFai Lau 已提交
1198 1199 1200 1201 1202 1203

	ip6_rt_copy_init(rt, ort);
	rt->rt6i_flags |= RTF_CACHE;
	rt->dst.flags |= DST_HOST;
	rt->rt6i_dst.addr = *daddr;
	rt->rt6i_dst.plen = 128;
L
Linus Torvalds 已提交
1204

M
Martin KaFai Lau 已提交
1205
	if (!rt6_is_gw_or_nonexthop(ort)) {
1206 1207
		if (ort->fib6_dst.plen != 128 &&
		    ipv6_addr_equal(&ort->fib6_dst.addr, daddr))
M
Martin KaFai Lau 已提交
1208
			rt->rt6i_flags |= RTF_ANYCAST;
L
Linus Torvalds 已提交
1209
#ifdef CONFIG_IPV6_SUBTREES
M
Martin KaFai Lau 已提交
1210 1211 1212
		if (rt->rt6i_src.plen && saddr) {
			rt->rt6i_src.addr = *saddr;
			rt->rt6i_src.plen = 128;
1213
		}
M
Martin KaFai Lau 已提交
1214
#endif
1215
	}
L
Linus Torvalds 已提交
1216

1217 1218
	return rt;
}
L
Linus Torvalds 已提交
1219

1220
static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt)
M
Martin KaFai Lau 已提交
1221
{
1222
	unsigned short flags = fib6_info_dst_flags(rt);
1223
	struct net_device *dev;
M
Martin KaFai Lau 已提交
1224 1225
	struct rt6_info *pcpu_rt;

1226 1227 1228
	if (!fib6_info_hold_safe(rt))
		return NULL;

1229 1230
	rcu_read_lock();
	dev = ip6_rt_get_dev_rcu(rt);
1231
	pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1232
	rcu_read_unlock();
1233 1234
	if (!pcpu_rt) {
		fib6_info_release(rt);
M
Martin KaFai Lau 已提交
1235
		return NULL;
1236
	}
M
Martin KaFai Lau 已提交
1237 1238 1239 1240 1241
	ip6_rt_copy_init(pcpu_rt, rt);
	pcpu_rt->rt6i_flags |= RTF_PCPU;
	return pcpu_rt;
}

1242
/* It should be called with rcu_read_lock() acquired */
1243
static struct rt6_info *rt6_get_pcpu_route(struct fib6_info *rt)
M
Martin KaFai Lau 已提交
1244
{
1245
	struct rt6_info *pcpu_rt, **p;
M
Martin KaFai Lau 已提交
1246 1247 1248 1249

	p = this_cpu_ptr(rt->rt6i_pcpu);
	pcpu_rt = *p;

1250 1251
	if (pcpu_rt)
		ip6_hold_safe(NULL, &pcpu_rt, false);
1252

1253 1254 1255
	return pcpu_rt;
}

1256
static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1257
					    struct fib6_info *rt)
1258 1259
{
	struct rt6_info *pcpu_rt, *prev, **p;
M
Martin KaFai Lau 已提交
1260 1261 1262

	pcpu_rt = ip6_rt_pcpu_alloc(rt);
	if (!pcpu_rt) {
1263 1264
		dst_hold(&net->ipv6.ip6_null_entry->dst);
		return net->ipv6.ip6_null_entry;
M
Martin KaFai Lau 已提交
1265 1266
	}

1267 1268 1269
	dst_hold(&pcpu_rt->dst);
	p = this_cpu_ptr(rt->rt6i_pcpu);
	prev = cmpxchg(p, NULL, pcpu_rt);
1270
	BUG_ON(prev);
1271

M
Martin KaFai Lau 已提交
1272 1273 1274
	return pcpu_rt;
}

1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
/* exception hash table implementation
 */
static DEFINE_SPINLOCK(rt6_exception_lock);

/* Remove rt6_ex from hash table and free the memory
 * Caller must hold rt6_exception_lock
 */
static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
				 struct rt6_exception *rt6_ex)
{
1285
	struct net *net;
W
Wei Wang 已提交
1286

1287 1288
	if (!bucket || !rt6_ex)
		return;
1289 1290

	net = dev_net(rt6_ex->rt6i->dst.dev);
1291
	hlist_del_rcu(&rt6_ex->hlist);
1292
	dst_release(&rt6_ex->rt6i->dst);
1293 1294 1295
	kfree_rcu(rt6_ex, rcu);
	WARN_ON_ONCE(!bucket->depth);
	bucket->depth--;
W
Wei Wang 已提交
1296
	net->ipv6.rt6_stats->fib_rt_cache--;
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
}

/* Remove oldest rt6_ex in bucket and free the memory
 * Caller must hold rt6_exception_lock
 */
static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
{
	struct rt6_exception *rt6_ex, *oldest = NULL;

	if (!bucket)
		return;

	hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
		if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
			oldest = rt6_ex;
	}
	rt6_remove_exception(bucket, oldest);
}

static u32 rt6_exception_hash(const struct in6_addr *dst,
			      const struct in6_addr *src)
{
	static u32 seed __read_mostly;
	u32 val;

	net_get_random_once(&seed, sizeof(seed));
	val = jhash(dst, sizeof(*dst), seed);

#ifdef CONFIG_IPV6_SUBTREES
	if (src)
		val = jhash(src, sizeof(*src), val);
#endif
	return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
}

/* Helper function to find the cached rt in the hash table
 * and update bucket pointer to point to the bucket for this
 * (daddr, saddr) pair
 * Caller must hold rt6_exception_lock
 */
static struct rt6_exception *
__rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
			      const struct in6_addr *daddr,
			      const struct in6_addr *saddr)
{
	struct rt6_exception *rt6_ex;
	u32 hval;

	if (!(*bucket) || !daddr)
		return NULL;

	hval = rt6_exception_hash(daddr, saddr);
	*bucket += hval;

	hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
		struct rt6_info *rt6 = rt6_ex->rt6i;
		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);

#ifdef CONFIG_IPV6_SUBTREES
		if (matched && saddr)
			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
#endif
		if (matched)
			return rt6_ex;
	}
	return NULL;
}

/* Helper function to find the cached rt in the hash table
 * and update bucket pointer to point to the bucket for this
 * (daddr, saddr) pair
 * Caller must hold rcu_read_lock()
 */
static struct rt6_exception *
__rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
			 const struct in6_addr *daddr,
			 const struct in6_addr *saddr)
{
	struct rt6_exception *rt6_ex;
	u32 hval;

	WARN_ON_ONCE(!rcu_read_lock_held());

	if (!(*bucket) || !daddr)
		return NULL;

	hval = rt6_exception_hash(daddr, saddr);
	*bucket += hval;

	hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
		struct rt6_info *rt6 = rt6_ex->rt6i;
		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);

#ifdef CONFIG_IPV6_SUBTREES
		if (matched && saddr)
			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
#endif
		if (matched)
			return rt6_ex;
	}
	return NULL;
}

1400
static unsigned int fib6_mtu(const struct fib6_info *rt)
1401 1402 1403
{
	unsigned int mtu;

D
David Ahern 已提交
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
	if (rt->fib6_pmtu) {
		mtu = rt->fib6_pmtu;
	} else {
		struct net_device *dev = fib6_info_nh_dev(rt);
		struct inet6_dev *idev;

		rcu_read_lock();
		idev = __in6_dev_get(dev);
		mtu = idev->cnf.mtu6;
		rcu_read_unlock();
	}

1416 1417 1418 1419 1420
	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);

	return mtu - lwtunnel_headroom(rt->fib6_nh.nh_lwtstate, mtu);
}

1421
static int rt6_insert_exception(struct rt6_info *nrt,
1422
				struct fib6_info *ort)
1423
{
1424
	struct net *net = dev_net(nrt->dst.dev);
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
	struct rt6_exception_bucket *bucket;
	struct in6_addr *src_key = NULL;
	struct rt6_exception *rt6_ex;
	int err = 0;

	spin_lock_bh(&rt6_exception_lock);

	if (ort->exception_bucket_flushed) {
		err = -EINVAL;
		goto out;
	}

	bucket = rcu_dereference_protected(ort->rt6i_exception_bucket,
					lockdep_is_held(&rt6_exception_lock));
	if (!bucket) {
		bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
				 GFP_ATOMIC);
		if (!bucket) {
			err = -ENOMEM;
			goto out;
		}
		rcu_assign_pointer(ort->rt6i_exception_bucket, bucket);
	}

#ifdef CONFIG_IPV6_SUBTREES
	/* rt6i_src.plen != 0 indicates ort is in subtree
	 * and exception table is indexed by a hash of
	 * both rt6i_dst and rt6i_src.
	 * Otherwise, the exception table is indexed by
	 * a hash of only rt6i_dst.
	 */
1456
	if (ort->fib6_src.plen)
1457 1458
		src_key = &nrt->rt6i_src.addr;
#endif
1459 1460 1461 1462

	/* Update rt6i_prefsrc as it could be changed
	 * in rt6_remove_prefsrc()
	 */
1463
	nrt->rt6i_prefsrc = ort->fib6_prefsrc;
1464 1465 1466 1467
	/* rt6_mtu_change() might lower mtu on ort.
	 * Only insert this exception route if its mtu
	 * is less than ort's mtu value.
	 */
1468
	if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(ort)) {
1469 1470 1471
		err = -EINVAL;
		goto out;
	}
1472

1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
	rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
					       src_key);
	if (rt6_ex)
		rt6_remove_exception(bucket, rt6_ex);

	rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
	if (!rt6_ex) {
		err = -ENOMEM;
		goto out;
	}
	rt6_ex->rt6i = nrt;
	rt6_ex->stamp = jiffies;
	hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
	bucket->depth++;
W
Wei Wang 已提交
1487
	net->ipv6.rt6_stats->fib_rt_cache++;
1488 1489 1490 1491 1492 1493 1494 1495

	if (bucket->depth > FIB6_MAX_DEPTH)
		rt6_exception_remove_oldest(bucket);

out:
	spin_unlock_bh(&rt6_exception_lock);

	/* Update fn->fn_sernum to invalidate all cached dst */
1496
	if (!err) {
1497
		spin_lock_bh(&ort->fib6_table->tb6_lock);
1498
		fib6_update_sernum(net, ort);
1499
		spin_unlock_bh(&ort->fib6_table->tb6_lock);
1500 1501
		fib6_force_start_gc(net);
	}
1502 1503 1504 1505

	return err;
}

1506
void rt6_flush_exceptions(struct fib6_info *rt)
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	struct hlist_node *tmp;
	int i;

	spin_lock_bh(&rt6_exception_lock);
	/* Prevent rt6_insert_exception() to recreate the bucket list */
	rt->exception_bucket_flushed = 1;

	bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
				    lockdep_is_held(&rt6_exception_lock));
	if (!bucket)
		goto out;

	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
		hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
			rt6_remove_exception(bucket, rt6_ex);
		WARN_ON_ONCE(bucket->depth);
		bucket++;
	}

out:
	spin_unlock_bh(&rt6_exception_lock);
}

/* Find cached rt in the hash table inside passed in rt
 * Caller has to hold rcu_read_lock()
 */
1536
static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
					   struct in6_addr *daddr,
					   struct in6_addr *saddr)
{
	struct rt6_exception_bucket *bucket;
	struct in6_addr *src_key = NULL;
	struct rt6_exception *rt6_ex;
	struct rt6_info *res = NULL;

	bucket = rcu_dereference(rt->rt6i_exception_bucket);

#ifdef CONFIG_IPV6_SUBTREES
	/* rt6i_src.plen != 0 indicates rt is in subtree
	 * and exception table is indexed by a hash of
	 * both rt6i_dst and rt6i_src.
	 * Otherwise, the exception table is indexed by
	 * a hash of only rt6i_dst.
	 */
1554
	if (rt->fib6_src.plen)
1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
		src_key = saddr;
#endif
	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);

	if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
		res = rt6_ex->rt6i;

	return res;
}

/* Remove the passed in cached rt from the hash table that contains it */
1566
static int rt6_remove_exception_rt(struct rt6_info *rt)
1567 1568 1569 1570
{
	struct rt6_exception_bucket *bucket;
	struct in6_addr *src_key = NULL;
	struct rt6_exception *rt6_ex;
1571
	struct fib6_info *from;
1572 1573
	int err;

1574
	from = rcu_dereference(rt->from);
1575
	if (!from ||
1576
	    !(rt->rt6i_flags & RTF_CACHE))
1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
		return -EINVAL;

	if (!rcu_access_pointer(from->rt6i_exception_bucket))
		return -ENOENT;

	spin_lock_bh(&rt6_exception_lock);
	bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
				    lockdep_is_held(&rt6_exception_lock));
#ifdef CONFIG_IPV6_SUBTREES
	/* rt6i_src.plen != 0 indicates 'from' is in subtree
	 * and exception table is indexed by a hash of
	 * both rt6i_dst and rt6i_src.
	 * Otherwise, the exception table is indexed by
	 * a hash of only rt6i_dst.
	 */
1592
	if (from->fib6_src.plen)
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
		src_key = &rt->rt6i_src.addr;
#endif
	rt6_ex = __rt6_find_exception_spinlock(&bucket,
					       &rt->rt6i_dst.addr,
					       src_key);
	if (rt6_ex) {
		rt6_remove_exception(bucket, rt6_ex);
		err = 0;
	} else {
		err = -ENOENT;
	}

	spin_unlock_bh(&rt6_exception_lock);
	return err;
}

/* Find rt6_ex which contains the passed in rt cache and
 * refresh its stamp
 */
static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
{
	struct rt6_exception_bucket *bucket;
1615
	struct fib6_info *from = rt->from;
1616 1617 1618 1619
	struct in6_addr *src_key = NULL;
	struct rt6_exception *rt6_ex;

	if (!from ||
1620
	    !(rt->rt6i_flags & RTF_CACHE))
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632
		return;

	rcu_read_lock();
	bucket = rcu_dereference(from->rt6i_exception_bucket);

#ifdef CONFIG_IPV6_SUBTREES
	/* rt6i_src.plen != 0 indicates 'from' is in subtree
	 * and exception table is indexed by a hash of
	 * both rt6i_dst and rt6i_src.
	 * Otherwise, the exception table is indexed by
	 * a hash of only rt6i_dst.
	 */
1633
	if (from->fib6_src.plen)
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
		src_key = &rt->rt6i_src.addr;
#endif
	rt6_ex = __rt6_find_exception_rcu(&bucket,
					  &rt->rt6i_dst.addr,
					  src_key);
	if (rt6_ex)
		rt6_ex->stamp = jiffies;

	rcu_read_unlock();
}

1645
static void rt6_exceptions_remove_prefsrc(struct fib6_info *rt)
1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	int i;

	bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
					lockdep_is_held(&rt6_exception_lock));

	if (bucket) {
		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
			hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
				rt6_ex->rt6i->rt6i_prefsrc.plen = 0;
			}
			bucket++;
		}
	}
}

1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
					 struct rt6_info *rt, int mtu)
{
	/* If the new MTU is lower than the route PMTU, this new MTU will be the
	 * lowest MTU in the path: always allow updating the route PMTU to
	 * reflect PMTU decreases.
	 *
	 * If the new MTU is higher, and the route PMTU is equal to the local
	 * MTU, this means the old MTU is the lowest in the path, so allow
	 * updating it: if other nodes now have lower MTUs, PMTU discovery will
	 * handle this.
	 */

	if (dst_mtu(&rt->dst) >= mtu)
		return true;

	if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
		return true;

	return false;
}

static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1687
				       struct fib6_info *rt, int mtu)
1688 1689 1690 1691 1692 1693 1694 1695
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	int i;

	bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
					lockdep_is_held(&rt6_exception_lock));

1696 1697 1698 1699 1700 1701 1702 1703
	if (!bucket)
		return;

	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
			struct rt6_info *entry = rt6_ex->rt6i;

			/* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1704
			 * route), the metrics of its rt->from have already
1705 1706
			 * been updated.
			 */
1707
			if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
1708
			    rt6_mtu_change_route_allowed(idev, entry, mtu))
1709
				dst_metric_set(&entry->dst, RTAX_MTU, mtu);
1710
		}
1711
		bucket++;
1712 1713 1714
	}
}

1715 1716
#define RTF_CACHE_GATEWAY	(RTF_GATEWAY | RTF_CACHE)

1717
static void rt6_exceptions_clean_tohost(struct fib6_info *rt,
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
					struct in6_addr *gateway)
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	struct hlist_node *tmp;
	int i;

	if (!rcu_access_pointer(rt->rt6i_exception_bucket))
		return;

	spin_lock_bh(&rt6_exception_lock);
	bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
				     lockdep_is_held(&rt6_exception_lock));

	if (bucket) {
		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
			hlist_for_each_entry_safe(rt6_ex, tmp,
						  &bucket->chain, hlist) {
				struct rt6_info *entry = rt6_ex->rt6i;

				if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
				    RTF_CACHE_GATEWAY &&
				    ipv6_addr_equal(gateway,
						    &entry->rt6i_gateway)) {
					rt6_remove_exception(bucket, rt6_ex);
				}
			}
			bucket++;
		}
	}

	spin_unlock_bh(&rt6_exception_lock);
}

1752 1753 1754 1755 1756 1757 1758
static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
				      struct rt6_exception *rt6_ex,
				      struct fib6_gc_args *gc_args,
				      unsigned long now)
{
	struct rt6_info *rt = rt6_ex->rt6i;

1759 1760 1761 1762 1763 1764
	/* we are pruning and obsoleting aged-out and non gateway exceptions
	 * even if others have still references to them, so that on next
	 * dst_check() such references can be dropped.
	 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
	 * expired, independently from their aging, as per RFC 8201 section 4
	 */
W
Wei Wang 已提交
1765 1766 1767 1768 1769 1770 1771 1772
	if (!(rt->rt6i_flags & RTF_EXPIRES)) {
		if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
			RT6_TRACE("aging clone %p\n", rt);
			rt6_remove_exception(bucket, rt6_ex);
			return;
		}
	} else if (time_after(jiffies, rt->dst.expires)) {
		RT6_TRACE("purging expired route %p\n", rt);
1773 1774
		rt6_remove_exception(bucket, rt6_ex);
		return;
W
Wei Wang 已提交
1775 1776 1777
	}

	if (rt->rt6i_flags & RTF_GATEWAY) {
1778 1779 1780
		struct neighbour *neigh;
		__u8 neigh_flags = 0;

1781 1782
		neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
		if (neigh)
1783
			neigh_flags = neigh->flags;
1784

1785 1786 1787 1788 1789 1790 1791
		if (!(neigh_flags & NTF_ROUTER)) {
			RT6_TRACE("purging route %p via non-router but gateway\n",
				  rt);
			rt6_remove_exception(bucket, rt6_ex);
			return;
		}
	}
W
Wei Wang 已提交
1792

1793 1794 1795
	gc_args->more++;
}

1796
void rt6_age_exceptions(struct fib6_info *rt,
1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
			struct fib6_gc_args *gc_args,
			unsigned long now)
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	struct hlist_node *tmp;
	int i;

	if (!rcu_access_pointer(rt->rt6i_exception_bucket))
		return;

1808 1809
	rcu_read_lock_bh();
	spin_lock(&rt6_exception_lock);
1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
	bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
				    lockdep_is_held(&rt6_exception_lock));

	if (bucket) {
		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
			hlist_for_each_entry_safe(rt6_ex, tmp,
						  &bucket->chain, hlist) {
				rt6_age_examine_exception(bucket, rt6_ex,
							  gc_args, now);
			}
			bucket++;
		}
	}
1823 1824
	spin_unlock(&rt6_exception_lock);
	rcu_read_unlock_bh();
1825 1826
}

1827 1828 1829
/* must be called with rcu lock held */
struct fib6_info *fib6_table_lookup(struct net *net, struct fib6_table *table,
				    int oif, struct flowi6 *fl6, int strict)
L
Linus Torvalds 已提交
1830
{
1831
	struct fib6_node *fn, *saved_fn;
1832
	struct fib6_info *f6i;
L
Linus Torvalds 已提交
1833

1834
	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1835
	saved_fn = fn;
L
Linus Torvalds 已提交
1836

D
David Ahern 已提交
1837 1838 1839
	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
		oif = 0;

M
Martin KaFai Lau 已提交
1840
redo_rt6_select:
1841 1842
	f6i = rt6_select(net, fn, oif, strict);
	if (f6i == net->ipv6.fib6_null_entry) {
M
Martin KaFai Lau 已提交
1843 1844 1845
		fn = fib6_backtrack(fn, &fl6->saddr);
		if (fn)
			goto redo_rt6_select;
1846 1847 1848 1849 1850 1851
		else if (strict & RT6_LOOKUP_F_REACHABLE) {
			/* also consider unreachable route */
			strict &= ~RT6_LOOKUP_F_REACHABLE;
			fn = saved_fn;
			goto redo_rt6_select;
		}
M
Martin KaFai Lau 已提交
1852 1853
	}

1854
	trace_fib6_table_lookup(net, f6i, table, fl6);
1855

1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
	return f6i;
}

struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
			       int oif, struct flowi6 *fl6,
			       const struct sk_buff *skb, int flags)
{
	struct fib6_info *f6i;
	struct rt6_info *rt;
	int strict = 0;

	strict |= flags & RT6_LOOKUP_F_IFACE;
	strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
	if (net->ipv6.devconf_all->forwarding == 0)
		strict |= RT6_LOOKUP_F_REACHABLE;

	rcu_read_lock();

	f6i = fib6_table_lookup(net, table, oif, fl6, strict);
	if (f6i->fib6_nsiblings)
		f6i = fib6_multipath_select(net, f6i, fl6, oif, skb, strict);

1878
	if (f6i == net->ipv6.fib6_null_entry) {
D
David Ahern 已提交
1879
		rt = net->ipv6.ip6_null_entry;
1880
		rcu_read_unlock();
1881 1882
		dst_hold(&rt->dst);
		return rt;
1883 1884 1885 1886 1887
	}

	/*Search through exception table */
	rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr);
	if (rt) {
1888
		if (ip6_hold_safe(net, &rt, true))
1889
			dst_use_noref(&rt->dst, jiffies);
1890

1891
		rcu_read_unlock();
M
Martin KaFai Lau 已提交
1892
		return rt;
1893
	} else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1894
			    !(f6i->fib6_flags & RTF_GATEWAY))) {
1895 1896 1897 1898 1899 1900 1901
		/* Create a RTF_CACHE clone which will not be
		 * owned by the fib6 tree.  It is for the special case where
		 * the daddr in the skb during the neighbor look-up is different
		 * from the fl6->daddr used to look-up route here.
		 */
		struct rt6_info *uncached_rt;

1902
		uncached_rt = ip6_rt_cache_alloc(f6i, &fl6->daddr, NULL);
M
Martin KaFai Lau 已提交
1903

1904
		rcu_read_unlock();
T
Thomas Graf 已提交
1905

1906 1907 1908 1909
		if (uncached_rt) {
			/* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
			 * No need for another dst_hold()
			 */
1910
			rt6_uncached_list_add(uncached_rt);
W
Wei Wang 已提交
1911
			atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
1912
		} else {
1913
			uncached_rt = net->ipv6.ip6_null_entry;
1914 1915
			dst_hold(&uncached_rt->dst);
		}
D
David Ahern 已提交
1916

1917
		return uncached_rt;
M
Martin KaFai Lau 已提交
1918 1919 1920 1921 1922
	} else {
		/* Get a percpu copy */

		struct rt6_info *pcpu_rt;

1923
		local_bh_disable();
1924
		pcpu_rt = rt6_get_pcpu_route(f6i);
M
Martin KaFai Lau 已提交
1925

1926 1927 1928
		if (!pcpu_rt)
			pcpu_rt = rt6_make_pcpu_route(net, f6i);

1929 1930
		local_bh_enable();
		rcu_read_unlock();
1931

M
Martin KaFai Lau 已提交
1932 1933
		return pcpu_rt;
	}
L
Linus Torvalds 已提交
1934
}
1935
EXPORT_SYMBOL_GPL(ip6_pol_route);
L
Linus Torvalds 已提交
1936

D
David Ahern 已提交
1937 1938 1939 1940 1941
static struct rt6_info *ip6_pol_route_input(struct net *net,
					    struct fib6_table *table,
					    struct flowi6 *fl6,
					    const struct sk_buff *skb,
					    int flags)
1942
{
D
David Ahern 已提交
1943
	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
1944 1945
}

1946 1947
struct dst_entry *ip6_route_input_lookup(struct net *net,
					 struct net_device *dev,
D
David Ahern 已提交
1948 1949 1950
					 struct flowi6 *fl6,
					 const struct sk_buff *skb,
					 int flags)
1951 1952 1953 1954
{
	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
		flags |= RT6_LOOKUP_F_IFACE;

D
David Ahern 已提交
1955
	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
1956
}
1957
EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1958

1959
static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1960 1961
				  struct flow_keys *keys,
				  struct flow_keys *flkeys)
1962 1963 1964
{
	const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
	const struct ipv6hdr *key_iph = outer_iph;
1965
	struct flow_keys *_flkeys = flkeys;
1966 1967 1968
	const struct ipv6hdr *inner_iph;
	const struct icmp6hdr *icmph;
	struct ipv6hdr _inner_iph;
1969
	struct icmp6hdr _icmph;
1970 1971 1972 1973

	if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
		goto out;

1974 1975 1976 1977 1978
	icmph = skb_header_pointer(skb, skb_transport_offset(skb),
				   sizeof(_icmph), &_icmph);
	if (!icmph)
		goto out;

1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
	if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
	    icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
	    icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
	    icmph->icmp6_type != ICMPV6_PARAMPROB)
		goto out;

	inner_iph = skb_header_pointer(skb,
				       skb_transport_offset(skb) + sizeof(*icmph),
				       sizeof(_inner_iph), &_inner_iph);
	if (!inner_iph)
		goto out;

	key_iph = inner_iph;
1992
	_flkeys = NULL;
1993
out:
1994 1995 1996 1997 1998 1999 2000 2001
	if (_flkeys) {
		keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
		keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
		keys->tags.flow_label = _flkeys->tags.flow_label;
		keys->basic.ip_proto = _flkeys->basic.ip_proto;
	} else {
		keys->addrs.v6addrs.src = key_iph->saddr;
		keys->addrs.v6addrs.dst = key_iph->daddr;
2002
		keys->tags.flow_label = ip6_flowlabel(key_iph);
2003 2004
		keys->basic.ip_proto = key_iph->nexthdr;
	}
2005 2006 2007
}

/* if skb is set it will be used and fl6 can be NULL */
2008 2009
u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
		       const struct sk_buff *skb, struct flow_keys *flkeys)
2010 2011
{
	struct flow_keys hash_keys;
2012
	u32 mhash;
2013

2014
	switch (ip6_multipath_hash_policy(net)) {
2015 2016 2017 2018 2019 2020 2021 2022
	case 0:
		memset(&hash_keys, 0, sizeof(hash_keys));
		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
		if (skb) {
			ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
		} else {
			hash_keys.addrs.v6addrs.src = fl6->saddr;
			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2023
			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
			hash_keys.basic.ip_proto = fl6->flowi6_proto;
		}
		break;
	case 1:
		if (skb) {
			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
			struct flow_keys keys;

			/* short-circuit if we already have L4 hash present */
			if (skb->l4_hash)
				return skb_get_hash_raw(skb) >> 1;

			memset(&hash_keys, 0, sizeof(hash_keys));

                        if (!flkeys) {
				skb_flow_dissect_flow_keys(skb, &keys, flag);
				flkeys = &keys;
			}
			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
			hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
			hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
			hash_keys.ports.src = flkeys->ports.src;
			hash_keys.ports.dst = flkeys->ports.dst;
			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
		} else {
			memset(&hash_keys, 0, sizeof(hash_keys));
			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
			hash_keys.addrs.v6addrs.src = fl6->saddr;
			hash_keys.addrs.v6addrs.dst = fl6->daddr;
			hash_keys.ports.src = fl6->fl6_sport;
			hash_keys.ports.dst = fl6->fl6_dport;
			hash_keys.basic.ip_proto = fl6->flowi6_proto;
		}
		break;
2058
	}
2059
	mhash = flow_hash_from_keys(&hash_keys);
2060

2061
	return mhash >> 1;
2062 2063
}

T
Thomas Graf 已提交
2064 2065
void ip6_route_input(struct sk_buff *skb)
{
2066
	const struct ipv6hdr *iph = ipv6_hdr(skb);
2067
	struct net *net = dev_net(skb->dev);
2068
	int flags = RT6_LOOKUP_F_HAS_SADDR;
2069
	struct ip_tunnel_info *tun_info;
2070
	struct flowi6 fl6 = {
2071
		.flowi6_iif = skb->dev->ifindex,
2072 2073
		.daddr = iph->daddr,
		.saddr = iph->saddr,
2074
		.flowlabel = ip6_flowinfo(iph),
2075 2076
		.flowi6_mark = skb->mark,
		.flowi6_proto = iph->nexthdr,
T
Thomas Graf 已提交
2077
	};
2078
	struct flow_keys *flkeys = NULL, _flkeys;
2079

2080
	tun_info = skb_tunnel_info(skb);
2081
	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2082
		fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2083 2084 2085 2086

	if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
		flkeys = &_flkeys;

2087
	if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2088
		fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2089
	skb_dst_drop(skb);
D
David Ahern 已提交
2090 2091
	skb_dst_set(skb,
		    ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags));
T
Thomas Graf 已提交
2092 2093
}

D
David Ahern 已提交
2094 2095 2096 2097 2098
static struct rt6_info *ip6_pol_route_output(struct net *net,
					     struct fib6_table *table,
					     struct flowi6 *fl6,
					     const struct sk_buff *skb,
					     int flags)
L
Linus Torvalds 已提交
2099
{
D
David Ahern 已提交
2100
	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
T
Thomas Graf 已提交
2101 2102
}

2103 2104
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
					 struct flowi6 *fl6, int flags)
T
Thomas Graf 已提交
2105
{
2106
	bool any_src;
T
Thomas Graf 已提交
2107

2108 2109 2110 2111 2112 2113 2114
	if (rt6_need_strict(&fl6->daddr)) {
		struct dst_entry *dst;

		dst = l3mdev_link_scope_lookup(net, fl6);
		if (dst)
			return dst;
	}
D
David Ahern 已提交
2115

2116
	fl6->flowi6_iif = LOOPBACK_IFINDEX;
2117

2118
	any_src = ipv6_addr_any(&fl6->saddr);
2119
	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2120
	    (fl6->flowi6_oif && any_src))
2121
		flags |= RT6_LOOKUP_F_IFACE;
T
Thomas Graf 已提交
2122

2123
	if (!any_src)
2124
		flags |= RT6_LOOKUP_F_HAS_SADDR;
2125 2126
	else if (sk)
		flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2127

D
David Ahern 已提交
2128
	return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
L
Linus Torvalds 已提交
2129
}
2130
EXPORT_SYMBOL_GPL(ip6_route_output_flags);
L
Linus Torvalds 已提交
2131

2132
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2133
{
2134
	struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2135
	struct net_device *loopback_dev = net->loopback_dev;
2136 2137
	struct dst_entry *new = NULL;

2138
	rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2139
		       DST_OBSOLETE_DEAD, 0);
2140
	if (rt) {
2141
		rt6_info_init(rt);
W
Wei Wang 已提交
2142
		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2143

2144
		new = &rt->dst;
2145
		new->__use = 1;
2146
		new->input = dst_discard;
E
Eric W. Biederman 已提交
2147
		new->output = dst_discard_out;
2148

2149
		dst_copy_metrics(new, &ort->dst);
2150

2151
		rt->rt6i_idev = in6_dev_get(loopback_dev);
A
Alexey Dobriyan 已提交
2152
		rt->rt6i_gateway = ort->rt6i_gateway;
2153
		rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2154 2155 2156 2157 2158 2159 2160

		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
#ifdef CONFIG_IPV6_SUBTREES
		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
#endif
	}

2161 2162
	dst_release(dst_orig);
	return new ? new : ERR_PTR(-ENOMEM);
2163 2164
}

L
Linus Torvalds 已提交
2165 2166 2167 2168
/*
 *	Destination cache support functions
 */

2169
static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2170
{
2171 2172
	u32 rt_cookie = 0;

2173
	if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2174 2175 2176 2177 2178 2179
		return false;

	if (fib6_check_expired(f6i))
		return false;

	return true;
2180 2181
}

2182 2183 2184
static struct dst_entry *rt6_check(struct rt6_info *rt,
				   struct fib6_info *from,
				   u32 cookie)
2185
{
2186
	u32 rt_cookie = 0;
2187

2188
	if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
2189
	    rt_cookie != cookie)
2190 2191 2192 2193 2194 2195 2196 2197
		return NULL;

	if (rt6_check_expired(rt))
		return NULL;

	return &rt->dst;
}

2198 2199 2200
static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
					    struct fib6_info *from,
					    u32 cookie)
2201
{
2202 2203
	if (!__rt6_check_expired(rt) &&
	    rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2204
	    fib6_check(from, cookie))
2205 2206 2207 2208 2209
		return &rt->dst;
	else
		return NULL;
}

L
Linus Torvalds 已提交
2210 2211
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
{
2212
	struct dst_entry *dst_ret;
2213
	struct fib6_info *from;
L
Linus Torvalds 已提交
2214 2215
	struct rt6_info *rt;

2216 2217 2218
	rt = container_of(dst, struct rt6_info, dst);

	rcu_read_lock();
L
Linus Torvalds 已提交
2219

2220 2221 2222 2223
	/* All IPV6 dsts are created with ->obsolete set to the value
	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
	 * into this function always.
	 */
2224

2225
	from = rcu_dereference(rt->from);
2226

2227 2228 2229
	if (from && (rt->rt6i_flags & RTF_PCPU ||
	    unlikely(!list_empty(&rt->rt6i_uncached))))
		dst_ret = rt6_dst_from_check(rt, from, cookie);
2230
	else
2231
		dst_ret = rt6_check(rt, from, cookie);
2232 2233 2234 2235

	rcu_read_unlock();

	return dst_ret;
L
Linus Torvalds 已提交
2236 2237 2238 2239 2240 2241 2242
}

static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
{
	struct rt6_info *rt = (struct rt6_info *) dst;

	if (rt) {
2243
		if (rt->rt6i_flags & RTF_CACHE) {
2244
			rcu_read_lock();
2245
			if (rt6_check_expired(rt)) {
2246
				rt6_remove_exception_rt(rt);
2247 2248
				dst = NULL;
			}
2249
			rcu_read_unlock();
2250
		} else {
L
Linus Torvalds 已提交
2251
			dst_release(dst);
2252 2253
			dst = NULL;
		}
L
Linus Torvalds 已提交
2254
	}
2255
	return dst;
L
Linus Torvalds 已提交
2256 2257 2258 2259 2260 2261
}

static void ip6_link_failure(struct sk_buff *skb)
{
	struct rt6_info *rt;

2262
	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
L
Linus Torvalds 已提交
2263

E
Eric Dumazet 已提交
2264
	rt = (struct rt6_info *) skb_dst(skb);
L
Linus Torvalds 已提交
2265
	if (rt) {
2266
		rcu_read_lock();
2267
		if (rt->rt6i_flags & RTF_CACHE) {
2268
			rt6_remove_exception_rt(rt);
2269
		} else {
2270
			struct fib6_info *from;
2271 2272
			struct fib6_node *fn;

2273 2274 2275 2276 2277 2278
			from = rcu_dereference(rt->from);
			if (from) {
				fn = rcu_dereference(from->fib6_node);
				if (fn && (rt->rt6i_flags & RTF_DEFAULT))
					fn->fn_sernum = -1;
			}
2279
		}
2280
		rcu_read_unlock();
L
Linus Torvalds 已提交
2281 2282 2283
	}
}

2284 2285
static void rt6_update_expires(struct rt6_info *rt0, int timeout)
{
2286 2287 2288 2289 2290 2291 2292 2293 2294
	if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
		struct fib6_info *from;

		rcu_read_lock();
		from = rcu_dereference(rt0->from);
		if (from)
			rt0->dst.expires = from->expires;
		rcu_read_unlock();
	}
2295 2296 2297 2298 2299

	dst_set_expires(&rt0->dst, timeout);
	rt0->rt6i_flags |= RTF_EXPIRES;
}

2300 2301 2302 2303
static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
{
	struct net *net = dev_net(rt->dst.dev);

2304
	dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2305 2306 2307 2308
	rt->rt6i_flags |= RTF_MODIFIED;
	rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
}

2309 2310
static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
{
2311 2312 2313 2314 2315 2316
	bool from_set;

	rcu_read_lock();
	from_set = !!rcu_dereference(rt->from);
	rcu_read_unlock();

2317
	return !(rt->rt6i_flags & RTF_CACHE) &&
2318
		(rt->rt6i_flags & RTF_PCPU || from_set);
2319 2320
}

2321 2322
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
				 const struct ipv6hdr *iph, u32 mtu)
L
Linus Torvalds 已提交
2323
{
2324
	const struct in6_addr *daddr, *saddr;
2325
	struct rt6_info *rt6 = (struct rt6_info *)dst;
L
Linus Torvalds 已提交
2326

2327 2328 2329
	if (dst_metric_locked(dst, RTAX_MTU))
		return;

2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340
	if (iph) {
		daddr = &iph->daddr;
		saddr = &iph->saddr;
	} else if (sk) {
		daddr = &sk->sk_v6_daddr;
		saddr = &inet6_sk(sk)->saddr;
	} else {
		daddr = NULL;
		saddr = NULL;
	}
	dst_confirm_neigh(dst, daddr);
2341 2342 2343
	mtu = max_t(u32, mtu, IPV6_MIN_MTU);
	if (mtu >= dst_mtu(dst))
		return;
2344

2345
	if (!rt6_cache_allowed_for_pmtu(rt6)) {
2346
		rt6_do_update_pmtu(rt6, mtu);
2347 2348 2349
		/* update rt6_ex->stamp for cache */
		if (rt6->rt6i_flags & RTF_CACHE)
			rt6_update_exception_stamp_rt(rt6);
2350
	} else if (daddr) {
2351
		struct fib6_info *from;
2352 2353
		struct rt6_info *nrt6;

2354
		rcu_read_lock();
2355 2356
		from = rcu_dereference(rt6->from);
		nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
2357 2358
		if (nrt6) {
			rt6_do_update_pmtu(nrt6, mtu);
2359
			if (rt6_insert_exception(nrt6, from))
2360
				dst_release_immediate(&nrt6->dst);
2361
		}
2362
		rcu_read_unlock();
L
Linus Torvalds 已提交
2363 2364 2365
	}
}

2366 2367 2368 2369 2370 2371
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
			       struct sk_buff *skb, u32 mtu)
{
	__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
}

2372
void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2373
		     int oif, u32 mark, kuid_t uid)
2374 2375 2376 2377 2378 2379 2380
{
	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
	struct dst_entry *dst;
	struct flowi6 fl6;

	memset(&fl6, 0, sizeof(fl6));
	fl6.flowi6_oif = oif;
2381
	fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
2382 2383
	fl6.daddr = iph->daddr;
	fl6.saddr = iph->saddr;
2384
	fl6.flowlabel = ip6_flowinfo(iph);
2385
	fl6.flowi6_uid = uid;
2386 2387 2388

	dst = ip6_route_output(net, NULL, &fl6);
	if (!dst->error)
2389
		__ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2390 2391 2392 2393 2394 2395
	dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_update_pmtu);

void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
2396
	int oif = sk->sk_bound_dev_if;
2397 2398
	struct dst_entry *dst;

2399 2400 2401 2402
	if (!oif && skb->dev)
		oif = l3mdev_master_ifindex(skb->dev);

	ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2403 2404 2405 2406 2407 2408 2409 2410 2411 2412

	dst = __sk_dst_get(sk);
	if (!dst || !dst->obsolete ||
	    dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
		return;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
		ip6_datagram_dst_update(sk, false);
	bh_unlock_sock(sk);
2413 2414 2415
}
EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);

2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432
void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
			   const struct flowi6 *fl6)
{
#ifdef CONFIG_IPV6_SUBTREES
	struct ipv6_pinfo *np = inet6_sk(sk);
#endif

	ip6_dst_store(sk, dst,
		      ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
		      &sk->sk_v6_daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
		      ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
		      &np->saddr :
#endif
		      NULL);
}

2433 2434 2435 2436 2437 2438 2439 2440 2441
/* Handle redirects */
struct ip6rd_flowi {
	struct flowi6 fl6;
	struct in6_addr gateway;
};

static struct rt6_info *__ip6_route_redirect(struct net *net,
					     struct fib6_table *table,
					     struct flowi6 *fl6,
D
David Ahern 已提交
2442
					     const struct sk_buff *skb,
2443 2444 2445
					     int flags)
{
	struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2446
	struct rt6_info *ret = NULL, *rt_cache;
2447
	struct fib6_info *rt;
2448 2449 2450
	struct fib6_node *fn;

	/* Get the "current" route for this destination and
A
Alexander Alemayhu 已提交
2451
	 * check if the redirect has come from appropriate router.
2452 2453 2454 2455 2456 2457 2458 2459
	 *
	 * RFC 4861 specifies that redirects should only be
	 * accepted if they come from the nexthop to the target.
	 * Due to the way the routes are chosen, this notion
	 * is a bit fuzzy and one might need to check all possible
	 * routes.
	 */

2460
	rcu_read_lock();
2461
	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2462
restart:
2463
	for_each_fib6_node_rt_rcu(fn) {
2464
		if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
2465
			continue;
2466
		if (fib6_check_expired(rt))
2467
			continue;
2468
		if (rt->fib6_flags & RTF_REJECT)
2469
			break;
2470
		if (!(rt->fib6_flags & RTF_GATEWAY))
2471
			continue;
2472
		if (fl6->flowi6_oif != rt->fib6_nh.nh_dev->ifindex)
2473
			continue;
2474 2475 2476 2477 2478
		/* rt_cache's gateway might be different from its 'parent'
		 * in the case of an ip redirect.
		 * So we keep searching in the exception table if the gateway
		 * is different.
		 */
2479
		if (!ipv6_addr_equal(&rdfl->gateway, &rt->fib6_nh.nh_gw)) {
2480 2481 2482 2483 2484 2485
			rt_cache = rt6_find_cached_rt(rt,
						      &fl6->daddr,
						      &fl6->saddr);
			if (rt_cache &&
			    ipv6_addr_equal(&rdfl->gateway,
					    &rt_cache->rt6i_gateway)) {
2486
				ret = rt_cache;
2487 2488
				break;
			}
2489
			continue;
2490
		}
2491 2492 2493 2494
		break;
	}

	if (!rt)
D
David Ahern 已提交
2495
		rt = net->ipv6.fib6_null_entry;
2496
	else if (rt->fib6_flags & RTF_REJECT) {
2497
		ret = net->ipv6.ip6_null_entry;
2498 2499 2500
		goto out;
	}

D
David Ahern 已提交
2501
	if (rt == net->ipv6.fib6_null_entry) {
M
Martin KaFai Lau 已提交
2502 2503 2504
		fn = fib6_backtrack(fn, &fl6->saddr);
		if (fn)
			goto restart;
2505
	}
M
Martin KaFai Lau 已提交
2506

2507
out:
2508
	if (ret)
2509
		ip6_hold_safe(net, &ret, true);
2510 2511
	else
		ret = ip6_create_rt_rcu(rt);
2512

2513
	rcu_read_unlock();
2514

2515
	trace_fib6_table_lookup(net, rt, table, fl6);
2516
	return ret;
2517 2518 2519
};

static struct dst_entry *ip6_route_redirect(struct net *net,
D
David Ahern 已提交
2520 2521 2522
					    const struct flowi6 *fl6,
					    const struct sk_buff *skb,
					    const struct in6_addr *gateway)
2523 2524 2525 2526 2527 2528 2529
{
	int flags = RT6_LOOKUP_F_HAS_SADDR;
	struct ip6rd_flowi rdfl;

	rdfl.fl6 = *fl6;
	rdfl.gateway = *gateway;

D
David Ahern 已提交
2530
	return fib6_rule_lookup(net, &rdfl.fl6, skb,
2531 2532 2533
				flags, __ip6_route_redirect);
}

2534 2535
void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
		  kuid_t uid)
2536 2537 2538 2539 2540 2541
{
	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
	struct dst_entry *dst;
	struct flowi6 fl6;

	memset(&fl6, 0, sizeof(fl6));
2542
	fl6.flowi6_iif = LOOPBACK_IFINDEX;
2543 2544 2545 2546
	fl6.flowi6_oif = oif;
	fl6.flowi6_mark = mark;
	fl6.daddr = iph->daddr;
	fl6.saddr = iph->saddr;
2547
	fl6.flowlabel = ip6_flowinfo(iph);
2548
	fl6.flowi6_uid = uid;
2549

D
David Ahern 已提交
2550
	dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
2551
	rt6_do_redirect(dst, NULL, skb);
2552 2553 2554 2555
	dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_redirect);

2556 2557 2558 2559 2560 2561 2562 2563 2564
void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
			    u32 mark)
{
	const struct ipv6hdr *iph = ipv6_hdr(skb);
	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
	struct dst_entry *dst;
	struct flowi6 fl6;

	memset(&fl6, 0, sizeof(fl6));
2565
	fl6.flowi6_iif = LOOPBACK_IFINDEX;
2566 2567 2568 2569
	fl6.flowi6_oif = oif;
	fl6.flowi6_mark = mark;
	fl6.daddr = msg->dest;
	fl6.saddr = iph->daddr;
2570
	fl6.flowi6_uid = sock_net_uid(net, NULL);
2571

D
David Ahern 已提交
2572
	dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
2573
	rt6_do_redirect(dst, NULL, skb);
2574 2575 2576
	dst_release(dst);
}

2577 2578
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
2579 2580
	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
		     sk->sk_uid);
2581 2582 2583
}
EXPORT_SYMBOL_GPL(ip6_sk_redirect);

2584
static unsigned int ip6_default_advmss(const struct dst_entry *dst)
L
Linus Torvalds 已提交
2585
{
2586 2587 2588 2589
	struct net_device *dev = dst->dev;
	unsigned int mtu = dst_mtu(dst);
	struct net *net = dev_net(dev);

L
Linus Torvalds 已提交
2590 2591
	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);

2592 2593
	if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
		mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
L
Linus Torvalds 已提交
2594 2595

	/*
2596 2597 2598
	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
	 * IPV6_MAXPLEN is also valid and means: "any MSS,
L
Linus Torvalds 已提交
2599 2600 2601 2602 2603 2604 2605
	 * rely only on pmtu discovery"
	 */
	if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
		mtu = IPV6_MAXPLEN;
	return mtu;
}

2606
static unsigned int ip6_mtu(const struct dst_entry *dst)
2607 2608
{
	struct inet6_dev *idev;
2609
	unsigned int mtu;
2610 2611

	mtu = dst_metric_raw(dst, RTAX_MTU);
2612
	if (mtu)
E
Eric Dumazet 已提交
2613
		goto out;
2614 2615

	mtu = IPV6_MIN_MTU;
2616 2617 2618 2619 2620 2621 2622

	rcu_read_lock();
	idev = __in6_dev_get(dst->dev);
	if (idev)
		mtu = idev->cnf.mtu6;
	rcu_read_unlock();

E
Eric Dumazet 已提交
2623
out:
2624 2625 2626
	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);

	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
2627 2628
}

2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676
/* MTU selection:
 * 1. mtu on route is locked - use it
 * 2. mtu from nexthop exception
 * 3. mtu from egress device
 *
 * based on ip6_dst_mtu_forward and exception logic of
 * rt6_find_cached_rt; called with rcu_read_lock
 */
u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
		      struct in6_addr *saddr)
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	struct in6_addr *src_key;
	struct inet6_dev *idev;
	u32 mtu = 0;

	if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
		mtu = f6i->fib6_pmtu;
		if (mtu)
			goto out;
	}

	src_key = NULL;
#ifdef CONFIG_IPV6_SUBTREES
	if (f6i->fib6_src.plen)
		src_key = saddr;
#endif

	bucket = rcu_dereference(f6i->rt6i_exception_bucket);
	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
	if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
		mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU);

	if (likely(!mtu)) {
		struct net_device *dev = fib6_info_nh_dev(f6i);

		mtu = IPV6_MIN_MTU;
		idev = __in6_dev_get(dev);
		if (idev && idev->cnf.mtu6 > mtu)
			mtu = idev->cnf.mtu6;
	}

	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
out:
	return mtu - lwtunnel_headroom(fib6_info_nh_lwt(f6i), mtu);
}

2677
struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2678
				  struct flowi6 *fl6)
L
Linus Torvalds 已提交
2679
{
2680
	struct dst_entry *dst;
L
Linus Torvalds 已提交
2681 2682
	struct rt6_info *rt;
	struct inet6_dev *idev = in6_dev_get(dev);
2683
	struct net *net = dev_net(dev);
L
Linus Torvalds 已提交
2684

2685
	if (unlikely(!idev))
E
Eric Dumazet 已提交
2686
		return ERR_PTR(-ENODEV);
L
Linus Torvalds 已提交
2687

2688
	rt = ip6_dst_alloc(net, dev, 0);
2689
	if (unlikely(!rt)) {
L
Linus Torvalds 已提交
2690
		in6_dev_put(idev);
2691
		dst = ERR_PTR(-ENOMEM);
L
Linus Torvalds 已提交
2692 2693 2694
		goto out;
	}

2695
	rt->dst.flags |= DST_HOST;
2696
	rt->dst.input = ip6_input;
2697
	rt->dst.output  = ip6_output;
2698
	rt->rt6i_gateway  = fl6->daddr;
2699
	rt->rt6i_dst.addr = fl6->daddr;
2700 2701
	rt->rt6i_dst.plen = 128;
	rt->rt6i_idev     = idev;
L
Li RongQing 已提交
2702
	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
L
Linus Torvalds 已提交
2703

2704
	/* Add this dst into uncached_list so that rt6_disable_ip() can
2705 2706 2707
	 * do proper release of the net_device
	 */
	rt6_uncached_list_add(rt);
W
Wei Wang 已提交
2708
	atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
L
Linus Torvalds 已提交
2709

2710 2711
	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);

L
Linus Torvalds 已提交
2712
out:
2713
	return dst;
L
Linus Torvalds 已提交
2714 2715
}

2716
static int ip6_dst_gc(struct dst_ops *ops)
L
Linus Torvalds 已提交
2717
{
2718
	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
2719 2720 2721 2722 2723
	int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
	int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
	int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
	int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
	unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2724
	int entries;
2725

2726
	entries = dst_entries_get_fast(ops);
2727
	if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
2728
	    entries <= rt_max_size)
L
Linus Torvalds 已提交
2729 2730
		goto out;

2731
	net->ipv6.ip6_rt_gc_expire++;
2732
	fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2733 2734
	entries = dst_entries_get_slow(ops);
	if (entries < ops->gc_thresh)
2735
		net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
L
Linus Torvalds 已提交
2736
out:
2737
	net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2738
	return entries > rt_max_size;
L
Linus Torvalds 已提交
2739 2740
}

2741
static int ip6_convert_metrics(struct net *net, struct fib6_info *rt,
2742
			       struct fib6_config *cfg)
2743
{
2744
	struct dst_metrics *p;
2745

2746
	if (!cfg->fc_mx)
2747 2748
		return 0;

2749 2750
	p = kzalloc(sizeof(*rt->fib6_metrics), GFP_KERNEL);
	if (unlikely(!p))
2751 2752
		return -ENOMEM;

2753 2754
	refcount_set(&p->refcnt, 1);
	rt->fib6_metrics = p;
2755

2756
	return ip_metrics_convert(net, cfg->fc_mx, cfg->fc_mx_len, p->metrics);
2757
}
L
Linus Torvalds 已提交
2758

2759 2760
static struct rt6_info *ip6_nh_lookup_table(struct net *net,
					    struct fib6_config *cfg,
2761 2762
					    const struct in6_addr *gw_addr,
					    u32 tbid, int flags)
2763 2764 2765 2766 2767 2768 2769 2770 2771
{
	struct flowi6 fl6 = {
		.flowi6_oif = cfg->fc_ifindex,
		.daddr = *gw_addr,
		.saddr = cfg->fc_prefsrc,
	};
	struct fib6_table *table;
	struct rt6_info *rt;

2772
	table = fib6_get_table(net, tbid);
2773 2774 2775 2776 2777 2778
	if (!table)
		return NULL;

	if (!ipv6_addr_any(&cfg->fc_prefsrc))
		flags |= RT6_LOOKUP_F_HAS_SADDR;

2779
	flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
D
David Ahern 已提交
2780
	rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
2781 2782 2783 2784 2785 2786 2787 2788 2789 2790

	/* if table lookup failed, fall back to full lookup */
	if (rt == net->ipv6.ip6_null_entry) {
		ip6_rt_put(rt);
		rt = NULL;
	}

	return rt;
}

2791 2792
static int ip6_route_check_nh_onlink(struct net *net,
				     struct fib6_config *cfg,
2793
				     const struct net_device *dev,
2794 2795
				     struct netlink_ext_ack *extack)
{
2796
	u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
2797 2798 2799 2800 2801 2802 2803 2804
	const struct in6_addr *gw_addr = &cfg->fc_gateway;
	u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
	struct rt6_info *grt;
	int err;

	err = 0;
	grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
	if (grt) {
2805
		if (!grt->dst.error &&
2806 2807
		    /* ignore match if it is the default route */
		    grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) &&
2808
		    (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
2809 2810
			NL_SET_ERR_MSG(extack,
				       "Nexthop has invalid gateway or device mismatch");
2811 2812 2813 2814 2815 2816 2817 2818 2819
			err = -EINVAL;
		}

		ip6_rt_put(grt);
	}

	return err;
}

2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830
static int ip6_route_check_nh(struct net *net,
			      struct fib6_config *cfg,
			      struct net_device **_dev,
			      struct inet6_dev **idev)
{
	const struct in6_addr *gw_addr = &cfg->fc_gateway;
	struct net_device *dev = _dev ? *_dev : NULL;
	struct rt6_info *grt = NULL;
	int err = -EHOSTUNREACH;

	if (cfg->fc_table) {
2831 2832 2833 2834
		int flags = RT6_LOOKUP_F_IFACE;

		grt = ip6_nh_lookup_table(net, cfg, gw_addr,
					  cfg->fc_table, flags);
2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
		if (grt) {
			if (grt->rt6i_flags & RTF_GATEWAY ||
			    (dev && dev != grt->dst.dev)) {
				ip6_rt_put(grt);
				grt = NULL;
			}
		}
	}

	if (!grt)
D
David Ahern 已提交
2845
		grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870

	if (!grt)
		goto out;

	if (dev) {
		if (dev != grt->dst.dev) {
			ip6_rt_put(grt);
			goto out;
		}
	} else {
		*_dev = dev = grt->dst.dev;
		*idev = grt->rt6i_idev;
		dev_hold(dev);
		in6_dev_hold(grt->rt6i_idev);
	}

	if (!(grt->rt6i_flags & RTF_GATEWAY))
		err = 0;

	ip6_rt_put(grt);

out:
	return err;
}

2871 2872 2873 2874 2875 2876
static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
			   struct net_device **_dev, struct inet6_dev **idev,
			   struct netlink_ext_ack *extack)
{
	const struct in6_addr *gw_addr = &cfg->fc_gateway;
	int gwa_type = ipv6_addr_type(gw_addr);
2877
	bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
2878
	const struct net_device *dev = *_dev;
2879
	bool need_addr_check = !dev;
2880 2881 2882 2883 2884 2885 2886
	int err = -EINVAL;

	/* if gw_addr is local we will fail to detect this in case
	 * address is still TENTATIVE (DAD in progress). rt6_lookup()
	 * will return already-added prefix route via interface that
	 * prefix route was assigned to, which might be non-loopback.
	 */
2887 2888 2889
	if (dev &&
	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928
		goto out;
	}

	if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
		/* IPv6 strictly inhibits using not link-local
		 * addresses as nexthop address.
		 * Otherwise, router will not able to send redirects.
		 * It is very good, but in some (rare!) circumstances
		 * (SIT, PtP, NBMA NOARP links) it is handy to allow
		 * some exceptions. --ANK
		 * We allow IPv4-mapped nexthops to support RFC4798-type
		 * addressing
		 */
		if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
			NL_SET_ERR_MSG(extack, "Invalid gateway address");
			goto out;
		}

		if (cfg->fc_flags & RTNH_F_ONLINK)
			err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
		else
			err = ip6_route_check_nh(net, cfg, _dev, idev);

		if (err)
			goto out;
	}

	/* reload in case device was changed */
	dev = *_dev;

	err = -EINVAL;
	if (!dev) {
		NL_SET_ERR_MSG(extack, "Egress device not specified");
		goto out;
	} else if (dev->flags & IFF_LOOPBACK) {
		NL_SET_ERR_MSG(extack,
			       "Egress device can not be loopback device for this route");
		goto out;
	}
2929 2930 2931 2932 2933 2934 2935 2936 2937 2938

	/* if we did not check gw_addr above, do so now that the
	 * egress device has been resolved.
	 */
	if (need_addr_check &&
	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
		goto out;
	}

2939 2940 2941 2942 2943
	err = 0;
out:
	return err;
}

2944
static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
2945
					      gfp_t gfp_flags,
2946
					      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
2947
{
2948
	struct net *net = cfg->fc_nlinfo.nl_net;
2949
	struct fib6_info *rt = NULL;
L
Linus Torvalds 已提交
2950 2951
	struct net_device *dev = NULL;
	struct inet6_dev *idev = NULL;
T
Thomas Graf 已提交
2952
	struct fib6_table *table;
L
Linus Torvalds 已提交
2953
	int addr_type;
2954
	int err = -EINVAL;
L
Linus Torvalds 已提交
2955

2956
	/* RTF_PCPU is an internal flag; can not be set by userspace */
2957 2958
	if (cfg->fc_flags & RTF_PCPU) {
		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
2959
		goto out;
2960
	}
2961

2962 2963 2964 2965 2966 2967
	/* RTF_CACHE is an internal flag; can not be set by userspace */
	if (cfg->fc_flags & RTF_CACHE) {
		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
		goto out;
	}

2968 2969 2970 2971 2972
	if (cfg->fc_type > RTN_MAX) {
		NL_SET_ERR_MSG(extack, "Invalid route type");
		goto out;
	}

2973 2974 2975 2976 2977 2978
	if (cfg->fc_dst_len > 128) {
		NL_SET_ERR_MSG(extack, "Invalid prefix length");
		goto out;
	}
	if (cfg->fc_src_len > 128) {
		NL_SET_ERR_MSG(extack, "Invalid source address length");
2979
		goto out;
2980
	}
L
Linus Torvalds 已提交
2981
#ifndef CONFIG_IPV6_SUBTREES
2982 2983 2984
	if (cfg->fc_src_len) {
		NL_SET_ERR_MSG(extack,
			       "Specifying source address requires IPV6_SUBTREES to be enabled");
2985
		goto out;
2986
	}
L
Linus Torvalds 已提交
2987
#endif
2988
	if (cfg->fc_ifindex) {
L
Linus Torvalds 已提交
2989
		err = -ENODEV;
2990
		dev = dev_get_by_index(net, cfg->fc_ifindex);
L
Linus Torvalds 已提交
2991 2992 2993 2994 2995 2996 2997
		if (!dev)
			goto out;
		idev = in6_dev_get(dev);
		if (!idev)
			goto out;
	}

2998 2999
	if (cfg->fc_metric == 0)
		cfg->fc_metric = IP6_RT_PRIO_USER;
L
Linus Torvalds 已提交
3000

3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
	if (cfg->fc_flags & RTNH_F_ONLINK) {
		if (!dev) {
			NL_SET_ERR_MSG(extack,
				       "Nexthop device required for onlink");
			err = -ENODEV;
			goto out;
		}

		if (!(dev->flags & IFF_UP)) {
			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
			err = -ENETDOWN;
			goto out;
		}
	}

3016
	err = -ENOBUFS;
3017 3018
	if (cfg->fc_nlinfo.nlh &&
	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3019
		table = fib6_get_table(net, cfg->fc_table);
3020
		if (!table) {
3021
			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3022 3023 3024 3025 3026
			table = fib6_new_table(net, cfg->fc_table);
		}
	} else {
		table = fib6_new_table(net, cfg->fc_table);
	}
3027 3028

	if (!table)
T
Thomas Graf 已提交
3029 3030
		goto out;

3031 3032 3033
	err = -ENOMEM;
	rt = fib6_info_alloc(gfp_flags);
	if (!rt)
L
Linus Torvalds 已提交
3034
		goto out;
3035 3036 3037

	if (cfg->fc_flags & RTF_ADDRCONF)
		rt->dst_nocount = true;
L
Linus Torvalds 已提交
3038

3039 3040
	err = ip6_convert_metrics(net, rt, cfg);
	if (err < 0)
L
Linus Torvalds 已提交
3041 3042
		goto out;

3043
	if (cfg->fc_flags & RTF_EXPIRES)
3044
		fib6_set_expires(rt, jiffies +
3045 3046
				clock_t_to_jiffies(cfg->fc_expires));
	else
3047
		fib6_clean_expires(rt);
L
Linus Torvalds 已提交
3048

3049 3050
	if (cfg->fc_protocol == RTPROT_UNSPEC)
		cfg->fc_protocol = RTPROT_BOOT;
3051
	rt->fib6_protocol = cfg->fc_protocol;
3052 3053

	addr_type = ipv6_addr_type(&cfg->fc_dst);
L
Linus Torvalds 已提交
3054

3055 3056 3057
	if (cfg->fc_encap) {
		struct lwtunnel_state *lwtstate;

3058
		err = lwtunnel_build_state(cfg->fc_encap_type,
3059
					   cfg->fc_encap, AF_INET6, cfg,
3060
					   &lwtstate, extack);
3061 3062
		if (err)
			goto out;
3063
		rt->fib6_nh.nh_lwtstate = lwtstate_get(lwtstate);
3064 3065
	}

3066 3067 3068
	ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
	rt->fib6_dst.plen = cfg->fc_dst_len;
	if (rt->fib6_dst.plen == 128)
3069
		rt->dst_host = true;
3070

L
Linus Torvalds 已提交
3071
#ifdef CONFIG_IPV6_SUBTREES
3072 3073
	ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
	rt->fib6_src.plen = cfg->fc_src_len;
L
Linus Torvalds 已提交
3074 3075
#endif

3076
	rt->fib6_metric = cfg->fc_metric;
3077
	rt->fib6_nh.nh_weight = 1;
L
Linus Torvalds 已提交
3078

3079
	rt->fib6_type = cfg->fc_type;
L
Linus Torvalds 已提交
3080 3081 3082 3083

	/* We cannot add true routes via loopback here,
	   they would result in kernel looping; promote them to reject routes
	 */
3084
	if ((cfg->fc_flags & RTF_REJECT) ||
3085 3086 3087
	    (dev && (dev->flags & IFF_LOOPBACK) &&
	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
	     !(cfg->fc_flags & RTF_LOCAL))) {
L
Linus Torvalds 已提交
3088
		/* hold loopback dev/idev if we haven't done so. */
3089
		if (dev != net->loopback_dev) {
L
Linus Torvalds 已提交
3090 3091 3092 3093
			if (dev) {
				dev_put(dev);
				in6_dev_put(idev);
			}
3094
			dev = net->loopback_dev;
L
Linus Torvalds 已提交
3095 3096 3097 3098 3099 3100 3101
			dev_hold(dev);
			idev = in6_dev_get(dev);
			if (!idev) {
				err = -ENODEV;
				goto out;
			}
		}
3102
		rt->fib6_flags = RTF_REJECT|RTF_NONEXTHOP;
L
Linus Torvalds 已提交
3103 3104 3105
		goto install_route;
	}

3106
	if (cfg->fc_flags & RTF_GATEWAY) {
3107 3108
		err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
		if (err)
3109
			goto out;
L
Linus Torvalds 已提交
3110

3111
		rt->fib6_nh.nh_gw = cfg->fc_gateway;
L
Linus Torvalds 已提交
3112 3113 3114
	}

	err = -ENODEV;
3115
	if (!dev)
L
Linus Torvalds 已提交
3116 3117
		goto out;

3118 3119 3120 3121 3122 3123
	if (idev->cnf.disable_ipv6) {
		NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
		err = -EACCES;
		goto out;
	}

3124 3125 3126 3127 3128 3129
	if (!(dev->flags & IFF_UP)) {
		NL_SET_ERR_MSG(extack, "Nexthop device is not up");
		err = -ENETDOWN;
		goto out;
	}

3130 3131
	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3132
			NL_SET_ERR_MSG(extack, "Invalid source address");
3133 3134 3135
			err = -EINVAL;
			goto out;
		}
3136 3137
		rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
		rt->fib6_prefsrc.plen = 128;
3138
	} else
3139
		rt->fib6_prefsrc.plen = 0;
3140

3141
	rt->fib6_flags = cfg->fc_flags;
L
Linus Torvalds 已提交
3142 3143

install_route:
3144
	if (!(rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3145
	    !netif_carrier_ok(dev))
3146 3147
		rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
	rt->fib6_nh.nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK);
3148
	rt->fib6_nh.nh_dev = dev;
3149
	rt->fib6_table = table;
3150

3151
	cfg->fc_nlinfo.nl_net = dev_net(dev);
3152

D
David Ahern 已提交
3153 3154 3155
	if (idev)
		in6_dev_put(idev);

3156
	return rt;
3157 3158 3159 3160 3161 3162
out:
	if (dev)
		dev_put(dev);
	if (idev)
		in6_dev_put(idev);

3163
	fib6_info_release(rt);
3164
	return ERR_PTR(err);
3165 3166
}

3167
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3168
		  struct netlink_ext_ack *extack)
3169
{
3170
	struct fib6_info *rt;
3171 3172
	int err;

3173
	rt = ip6_route_info_create(cfg, gfp_flags, extack);
3174 3175
	if (IS_ERR(rt))
		return PTR_ERR(rt);
3176

3177
	err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3178
	fib6_info_release(rt);
3179

L
Linus Torvalds 已提交
3180 3181 3182
	return err;
}

3183
static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
L
Linus Torvalds 已提交
3184
{
3185
	struct net *net = info->nl_net;
T
Thomas Graf 已提交
3186
	struct fib6_table *table;
3187
	int err;
L
Linus Torvalds 已提交
3188

D
David Ahern 已提交
3189
	if (rt == net->ipv6.fib6_null_entry) {
3190 3191 3192
		err = -ENOENT;
		goto out;
	}
3193

3194
	table = rt->fib6_table;
3195
	spin_lock_bh(&table->tb6_lock);
3196
	err = fib6_del(rt, info);
3197
	spin_unlock_bh(&table->tb6_lock);
L
Linus Torvalds 已提交
3198

3199
out:
3200
	fib6_info_release(rt);
L
Linus Torvalds 已提交
3201 3202 3203
	return err;
}

3204
int ip6_del_rt(struct net *net, struct fib6_info *rt)
3205
{
3206 3207
	struct nl_info info = { .nl_net = net };

3208
	return __ip6_del_rt(rt, &info);
3209 3210
}

3211
static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3212 3213
{
	struct nl_info *info = &cfg->fc_nlinfo;
3214
	struct net *net = info->nl_net;
3215
	struct sk_buff *skb = NULL;
3216
	struct fib6_table *table;
3217
	int err = -ENOENT;
3218

D
David Ahern 已提交
3219
	if (rt == net->ipv6.fib6_null_entry)
3220
		goto out_put;
3221
	table = rt->fib6_table;
3222
	spin_lock_bh(&table->tb6_lock);
3223

3224
	if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3225
		struct fib6_info *sibling, *next_sibling;
3226

3227 3228 3229 3230 3231
		/* prefer to send a single notification with all hops */
		skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
		if (skb) {
			u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;

3232
			if (rt6_fill_node(net, skb, rt, NULL,
3233 3234 3235 3236 3237 3238 3239 3240
					  NULL, NULL, 0, RTM_DELROUTE,
					  info->portid, seq, 0) < 0) {
				kfree_skb(skb);
				skb = NULL;
			} else
				info->skip_notify = 1;
		}

3241
		list_for_each_entry_safe(sibling, next_sibling,
3242 3243
					 &rt->fib6_siblings,
					 fib6_siblings) {
3244 3245
			err = fib6_del(sibling, info);
			if (err)
3246
				goto out_unlock;
3247 3248 3249 3250
		}
	}

	err = fib6_del(rt, info);
3251
out_unlock:
3252
	spin_unlock_bh(&table->tb6_lock);
3253
out_put:
3254
	fib6_info_release(rt);
3255 3256

	if (skb) {
3257
		rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3258 3259
			    info->nlh, gfp_any());
	}
3260 3261 3262
	return err;
}

3263 3264 3265 3266 3267 3268 3269 3270 3271 3272
static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
{
	int rc = -ESRCH;

	if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
		goto out;

	if (cfg->fc_flags & RTF_GATEWAY &&
	    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
		goto out;
3273 3274

	rc = rt6_remove_exception_rt(rt);
3275 3276 3277 3278
out:
	return rc;
}

3279 3280
static int ip6_route_del(struct fib6_config *cfg,
			 struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
3281
{
3282
	struct rt6_info *rt_cache;
T
Thomas Graf 已提交
3283
	struct fib6_table *table;
3284
	struct fib6_info *rt;
L
Linus Torvalds 已提交
3285 3286 3287
	struct fib6_node *fn;
	int err = -ESRCH;

3288
	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3289 3290
	if (!table) {
		NL_SET_ERR_MSG(extack, "FIB table does not exist");
T
Thomas Graf 已提交
3291
		return err;
3292
	}
T
Thomas Graf 已提交
3293

3294
	rcu_read_lock();
L
Linus Torvalds 已提交
3295

T
Thomas Graf 已提交
3296
	fn = fib6_locate(&table->tb6_root,
3297
			 &cfg->fc_dst, cfg->fc_dst_len,
3298
			 &cfg->fc_src, cfg->fc_src_len,
3299
			 !(cfg->fc_flags & RTF_CACHE));
3300

L
Linus Torvalds 已提交
3301
	if (fn) {
3302
		for_each_fib6_node_rt_rcu(fn) {
3303
			if (cfg->fc_flags & RTF_CACHE) {
3304 3305
				int rc;

3306 3307
				rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst,
							      &cfg->fc_src);
3308 3309
				if (rt_cache) {
					rc = ip6_del_cached_rt(rt_cache, cfg);
3310 3311
					if (rc != -ESRCH) {
						rcu_read_unlock();
3312
						return rc;
3313
					}
3314 3315
				}
				continue;
3316
			}
3317
			if (cfg->fc_ifindex &&
3318 3319
			    (!rt->fib6_nh.nh_dev ||
			     rt->fib6_nh.nh_dev->ifindex != cfg->fc_ifindex))
L
Linus Torvalds 已提交
3320
				continue;
3321
			if (cfg->fc_flags & RTF_GATEWAY &&
3322
			    !ipv6_addr_equal(&cfg->fc_gateway, &rt->fib6_nh.nh_gw))
L
Linus Torvalds 已提交
3323
				continue;
3324
			if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
L
Linus Torvalds 已提交
3325
				continue;
3326
			if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
3327
				continue;
3328 3329
			if (!fib6_info_hold_safe(rt))
				continue;
3330
			rcu_read_unlock();
L
Linus Torvalds 已提交
3331

3332 3333 3334 3335 3336
			/* if gateway was specified only delete the one hop */
			if (cfg->fc_flags & RTF_GATEWAY)
				return __ip6_del_rt(rt, &cfg->fc_nlinfo);

			return __ip6_del_rt_siblings(rt, cfg);
L
Linus Torvalds 已提交
3337 3338
		}
	}
3339
	rcu_read_unlock();
L
Linus Torvalds 已提交
3340 3341 3342 3343

	return err;
}

3344
static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
3345 3346
{
	struct netevent_redirect netevent;
3347 3348 3349 3350
	struct rt6_info *rt, *nrt = NULL;
	struct ndisc_options ndopts;
	struct inet6_dev *in6_dev;
	struct neighbour *neigh;
3351
	struct fib6_info *from;
3352
	struct rd_msg *msg;
3353 3354
	int optlen, on_link;
	u8 *lladdr;
3355

3356
	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
3357
	optlen -= sizeof(*msg);
3358 3359

	if (optlen < 0) {
3360
		net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
3361 3362 3363
		return;
	}

3364
	msg = (struct rd_msg *)icmp6_hdr(skb);
3365

3366
	if (ipv6_addr_is_multicast(&msg->dest)) {
3367
		net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
3368 3369 3370
		return;
	}

3371
	on_link = 0;
3372
	if (ipv6_addr_equal(&msg->dest, &msg->target)) {
3373
		on_link = 1;
3374
	} else if (ipv6_addr_type(&msg->target) !=
3375
		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
3376
		net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390
		return;
	}

	in6_dev = __in6_dev_get(skb->dev);
	if (!in6_dev)
		return;
	if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
		return;

	/* RFC2461 8.1:
	 *	The IP source address of the Redirect MUST be the same as the current
	 *	first-hop router for the specified ICMP Destination Address.
	 */

3391
	if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3392 3393 3394
		net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
		return;
	}
3395 3396

	lladdr = NULL;
3397 3398 3399 3400 3401 3402 3403 3404 3405
	if (ndopts.nd_opts_tgt_lladdr) {
		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
					     skb->dev);
		if (!lladdr) {
			net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
			return;
		}
	}

3406
	rt = (struct rt6_info *) dst;
3407
	if (rt->rt6i_flags & RTF_REJECT) {
3408
		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3409
		return;
3410
	}
3411

3412 3413 3414 3415
	/* Redirect received -> path was valid.
	 * Look, redirects are sent only in response to data packets,
	 * so that this nexthop apparently is reachable. --ANK
	 */
3416
	dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
3417

3418
	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
3419 3420
	if (!neigh)
		return;
3421

L
Linus Torvalds 已提交
3422 3423 3424 3425
	/*
	 *	We have finally decided to accept it.
	 */

3426
	ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
L
Linus Torvalds 已提交
3427 3428 3429
		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
		     NEIGH_UPDATE_F_OVERRIDE|
		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
3430 3431
				     NEIGH_UPDATE_F_ISROUTER)),
		     NDISC_REDIRECT, &ndopts);
L
Linus Torvalds 已提交
3432

3433
	rcu_read_lock();
3434
	from = rcu_dereference(rt->from);
3435 3436 3437
	/* This fib6_info_hold() is safe here because we hold reference to rt
	 * and rt already holds reference to fib6_info.
	 */
3438
	fib6_info_hold(from);
3439
	rcu_read_unlock();
3440 3441

	nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL);
3442
	if (!nrt)
L
Linus Torvalds 已提交
3443 3444 3445 3446 3447 3448
		goto out;

	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
	if (on_link)
		nrt->rt6i_flags &= ~RTF_GATEWAY;

A
Alexey Dobriyan 已提交
3449
	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
L
Linus Torvalds 已提交
3450

3451 3452 3453 3454
	/* No need to remove rt from the exception table if rt is
	 * a cached route because rt6_insert_exception() will
	 * takes care of it
	 */
3455
	if (rt6_insert_exception(nrt, from)) {
3456 3457 3458
		dst_release_immediate(&nrt->dst);
		goto out;
	}
L
Linus Torvalds 已提交
3459

3460 3461
	netevent.old = &rt->dst;
	netevent.new = &nrt->dst;
3462
	netevent.daddr = &msg->dest;
3463
	netevent.neigh = neigh;
3464 3465
	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);

L
Linus Torvalds 已提交
3466
out:
3467
	fib6_info_release(from);
3468
	neigh_release(neigh);
3469 3470
}

3471
#ifdef CONFIG_IPV6_ROUTE_INFO
3472
static struct fib6_info *rt6_get_route_info(struct net *net,
3473
					   const struct in6_addr *prefix, int prefixlen,
3474 3475
					   const struct in6_addr *gwaddr,
					   struct net_device *dev)
3476
{
3477 3478
	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
	int ifindex = dev->ifindex;
3479
	struct fib6_node *fn;
3480
	struct fib6_info *rt = NULL;
T
Thomas Graf 已提交
3481 3482
	struct fib6_table *table;

3483
	table = fib6_get_table(net, tb_id);
3484
	if (!table)
T
Thomas Graf 已提交
3485
		return NULL;
3486

3487
	rcu_read_lock();
3488
	fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3489 3490 3491
	if (!fn)
		goto out;

3492
	for_each_fib6_node_rt_rcu(fn) {
3493
		if (rt->fib6_nh.nh_dev->ifindex != ifindex)
3494
			continue;
3495
		if ((rt->fib6_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
3496
			continue;
3497
		if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
3498
			continue;
3499 3500
		if (!fib6_info_hold_safe(rt))
			continue;
3501 3502 3503
		break;
	}
out:
3504
	rcu_read_unlock();
3505 3506 3507
	return rt;
}

3508
static struct fib6_info *rt6_add_route_info(struct net *net,
3509
					   const struct in6_addr *prefix, int prefixlen,
3510 3511
					   const struct in6_addr *gwaddr,
					   struct net_device *dev,
3512
					   unsigned int pref)
3513
{
3514
	struct fib6_config cfg = {
3515
		.fc_metric	= IP6_RT_PRIO_USER,
3516
		.fc_ifindex	= dev->ifindex,
3517 3518 3519
		.fc_dst_len	= prefixlen,
		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
				  RTF_UP | RTF_PREF(pref),
3520
		.fc_protocol = RTPROT_RA,
3521
		.fc_type = RTN_UNICAST,
3522
		.fc_nlinfo.portid = 0,
3523 3524
		.fc_nlinfo.nlh = NULL,
		.fc_nlinfo.nl_net = net,
3525 3526
	};

3527
	cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
A
Alexey Dobriyan 已提交
3528 3529
	cfg.fc_dst = *prefix;
	cfg.fc_gateway = *gwaddr;
3530

3531 3532
	/* We should treat it as a default route if prefix length is 0. */
	if (!prefixlen)
3533
		cfg.fc_flags |= RTF_DEFAULT;
3534

3535
	ip6_route_add(&cfg, GFP_ATOMIC, NULL);
3536

3537
	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
3538 3539 3540
}
#endif

3541
struct fib6_info *rt6_get_dflt_router(struct net *net,
3542 3543
				     const struct in6_addr *addr,
				     struct net_device *dev)
3544
{
3545
	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
3546
	struct fib6_info *rt;
T
Thomas Graf 已提交
3547
	struct fib6_table *table;
L
Linus Torvalds 已提交
3548

3549
	table = fib6_get_table(net, tb_id);
3550
	if (!table)
T
Thomas Graf 已提交
3551
		return NULL;
L
Linus Torvalds 已提交
3552

3553 3554
	rcu_read_lock();
	for_each_fib6_node_rt_rcu(&table->tb6_root) {
3555
		if (dev == rt->fib6_nh.nh_dev &&
3556
		    ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3557
		    ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
L
Linus Torvalds 已提交
3558 3559
			break;
	}
3560 3561
	if (rt && !fib6_info_hold_safe(rt))
		rt = NULL;
3562
	rcu_read_unlock();
L
Linus Torvalds 已提交
3563 3564 3565
	return rt;
}

3566
struct fib6_info *rt6_add_dflt_router(struct net *net,
3567
				     const struct in6_addr *gwaddr,
3568 3569
				     struct net_device *dev,
				     unsigned int pref)
L
Linus Torvalds 已提交
3570
{
3571
	struct fib6_config cfg = {
D
David Ahern 已提交
3572
		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
3573
		.fc_metric	= IP6_RT_PRIO_USER,
3574 3575 3576
		.fc_ifindex	= dev->ifindex,
		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
				  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
3577
		.fc_protocol = RTPROT_RA,
3578
		.fc_type = RTN_UNICAST,
3579
		.fc_nlinfo.portid = 0,
3580
		.fc_nlinfo.nlh = NULL,
3581
		.fc_nlinfo.nl_net = net,
3582
	};
L
Linus Torvalds 已提交
3583

A
Alexey Dobriyan 已提交
3584
	cfg.fc_gateway = *gwaddr;
L
Linus Torvalds 已提交
3585

3586
	if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
3587 3588 3589 3590 3591 3592
		struct fib6_table *table;

		table = fib6_get_table(dev_net(dev), cfg.fc_table);
		if (table)
			table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
	}
L
Linus Torvalds 已提交
3593

3594
	return rt6_get_dflt_router(net, gwaddr, dev);
L
Linus Torvalds 已提交
3595 3596
}

3597 3598
static void __rt6_purge_dflt_routers(struct net *net,
				     struct fib6_table *table)
L
Linus Torvalds 已提交
3599
{
3600
	struct fib6_info *rt;
L
Linus Torvalds 已提交
3601 3602

restart:
3603 3604
	rcu_read_lock();
	for_each_fib6_node_rt_rcu(&table->tb6_root) {
D
David Ahern 已提交
3605 3606 3607
		struct net_device *dev = fib6_info_nh_dev(rt);
		struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;

3608
		if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3609 3610
		    (!idev || idev->cnf.accept_ra != 2) &&
		    fib6_info_hold_safe(rt)) {
3611 3612
			rcu_read_unlock();
			ip6_del_rt(net, rt);
L
Linus Torvalds 已提交
3613 3614 3615
			goto restart;
		}
	}
3616
	rcu_read_unlock();
3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632

	table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
}

void rt6_purge_dflt_routers(struct net *net)
{
	struct fib6_table *table;
	struct hlist_head *head;
	unsigned int h;

	rcu_read_lock();

	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
		head = &net->ipv6.fib_table_hash[h];
		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
			if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
3633
				__rt6_purge_dflt_routers(net, table);
3634 3635 3636 3637
		}
	}

	rcu_read_unlock();
L
Linus Torvalds 已提交
3638 3639
}

3640 3641
static void rtmsg_to_fib6_config(struct net *net,
				 struct in6_rtmsg *rtmsg,
3642 3643 3644 3645
				 struct fib6_config *cfg)
{
	memset(cfg, 0, sizeof(*cfg));

D
David Ahern 已提交
3646 3647
	cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
			 : RT6_TABLE_MAIN;
3648 3649 3650 3651 3652 3653
	cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
	cfg->fc_metric = rtmsg->rtmsg_metric;
	cfg->fc_expires = rtmsg->rtmsg_info;
	cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
	cfg->fc_src_len = rtmsg->rtmsg_src_len;
	cfg->fc_flags = rtmsg->rtmsg_flags;
3654
	cfg->fc_type = rtmsg->rtmsg_type;
3655

3656
	cfg->fc_nlinfo.nl_net = net;
3657

A
Alexey Dobriyan 已提交
3658 3659 3660
	cfg->fc_dst = rtmsg->rtmsg_dst;
	cfg->fc_src = rtmsg->rtmsg_src;
	cfg->fc_gateway = rtmsg->rtmsg_gateway;
3661 3662
}

3663
int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
L
Linus Torvalds 已提交
3664
{
3665
	struct fib6_config cfg;
L
Linus Torvalds 已提交
3666 3667 3668
	struct in6_rtmsg rtmsg;
	int err;

3669
	switch (cmd) {
L
Linus Torvalds 已提交
3670 3671
	case SIOCADDRT:		/* Add a route */
	case SIOCDELRT:		/* Delete a route */
3672
		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
L
Linus Torvalds 已提交
3673 3674 3675 3676 3677
			return -EPERM;
		err = copy_from_user(&rtmsg, arg,
				     sizeof(struct in6_rtmsg));
		if (err)
			return -EFAULT;
3678

3679
		rtmsg_to_fib6_config(net, &rtmsg, &cfg);
3680

L
Linus Torvalds 已提交
3681 3682 3683
		rtnl_lock();
		switch (cmd) {
		case SIOCADDRT:
3684
			err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
L
Linus Torvalds 已提交
3685 3686
			break;
		case SIOCDELRT:
3687
			err = ip6_route_del(&cfg, NULL);
L
Linus Torvalds 已提交
3688 3689 3690 3691 3692 3693 3694
			break;
		default:
			err = -EINVAL;
		}
		rtnl_unlock();

		return err;
3695
	}
L
Linus Torvalds 已提交
3696 3697 3698 3699 3700 3701 3702 3703

	return -EINVAL;
}

/*
 *	Drop the packet on the floor
 */

3704
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
L
Linus Torvalds 已提交
3705
{
3706
	int type;
E
Eric Dumazet 已提交
3707
	struct dst_entry *dst = skb_dst(skb);
3708 3709
	switch (ipstats_mib_noroutes) {
	case IPSTATS_MIB_INNOROUTES:
3710
		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
U
Ulrich Weber 已提交
3711
		if (type == IPV6_ADDR_ANY) {
3712 3713
			IP6_INC_STATS(dev_net(dst->dev),
				      __in6_dev_get_safely(skb->dev),
3714
				      IPSTATS_MIB_INADDRERRORS);
3715 3716 3717 3718
			break;
		}
		/* FALLTHROUGH */
	case IPSTATS_MIB_OUTNOROUTES:
3719 3720
		IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
			      ipstats_mib_noroutes);
3721 3722
		break;
	}
3723
	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
L
Linus Torvalds 已提交
3724 3725 3726 3727
	kfree_skb(skb);
	return 0;
}

3728 3729
static int ip6_pkt_discard(struct sk_buff *skb)
{
3730
	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
3731 3732
}

E
Eric W. Biederman 已提交
3733
static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
3734
{
E
Eric Dumazet 已提交
3735
	skb->dev = skb_dst(skb)->dev;
3736
	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
L
Linus Torvalds 已提交
3737 3738
}

3739 3740
static int ip6_pkt_prohibit(struct sk_buff *skb)
{
3741
	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
3742 3743
}

E
Eric W. Biederman 已提交
3744
static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3745
{
E
Eric Dumazet 已提交
3746
	skb->dev = skb_dst(skb)->dev;
3747
	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
3748 3749
}

L
Linus Torvalds 已提交
3750 3751 3752 3753
/*
 *	Allocate a dst for local (unicast / anycast) address.
 */

3754 3755 3756 3757
struct fib6_info *addrconf_f6i_alloc(struct net *net,
				     struct inet6_dev *idev,
				     const struct in6_addr *addr,
				     bool anycast, gfp_t gfp_flags)
L
Linus Torvalds 已提交
3758
{
D
David Ahern 已提交
3759
	u32 tb_id;
3760
	struct net_device *dev = idev->dev;
3761
	struct fib6_info *f6i;
3762

3763 3764
	f6i = fib6_info_alloc(gfp_flags);
	if (!f6i)
L
Linus Torvalds 已提交
3765 3766
		return ERR_PTR(-ENOMEM);

3767 3768 3769 3770
	f6i->dst_nocount = true;
	f6i->dst_host = true;
	f6i->fib6_protocol = RTPROT_KERNEL;
	f6i->fib6_flags = RTF_UP | RTF_NONEXTHOP;
3771
	if (anycast) {
3772 3773
		f6i->fib6_type = RTN_ANYCAST;
		f6i->fib6_flags |= RTF_ANYCAST;
3774
	} else {
3775 3776
		f6i->fib6_type = RTN_LOCAL;
		f6i->fib6_flags |= RTF_LOCAL;
3777
	}
L
Linus Torvalds 已提交
3778

3779
	f6i->fib6_nh.nh_gw = *addr;
3780
	dev_hold(dev);
3781 3782 3783
	f6i->fib6_nh.nh_dev = dev;
	f6i->fib6_dst.addr = *addr;
	f6i->fib6_dst.plen = 128;
D
David Ahern 已提交
3784
	tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
3785
	f6i->fib6_table = fib6_get_table(net, tb_id);
L
Linus Torvalds 已提交
3786

3787
	return f6i;
L
Linus Torvalds 已提交
3788 3789
}

3790 3791 3792 3793 3794 3795 3796
/* remove deleted ip from prefsrc entries */
struct arg_dev_net_ip {
	struct net_device *dev;
	struct net *net;
	struct in6_addr *addr;
};

3797
static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
3798 3799 3800 3801 3802
{
	struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;

3803
	if (((void *)rt->fib6_nh.nh_dev == dev || !dev) &&
D
David Ahern 已提交
3804
	    rt != net->ipv6.fib6_null_entry &&
3805
	    ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
3806
		spin_lock_bh(&rt6_exception_lock);
3807
		/* remove prefsrc entry */
3808
		rt->fib6_prefsrc.plen = 0;
3809 3810 3811
		/* need to update cache as well */
		rt6_exceptions_remove_prefsrc(rt);
		spin_unlock_bh(&rt6_exception_lock);
3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823
	}
	return 0;
}

void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
{
	struct net *net = dev_net(ifp->idev->dev);
	struct arg_dev_net_ip adni = {
		.dev = ifp->idev->dev,
		.net = net,
		.addr = &ifp->addr,
	};
3824
	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
3825 3826
}

3827 3828 3829
#define RTF_RA_ROUTER		(RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)

/* Remove routers and update dst entries when gateway turn into host. */
3830
static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
3831 3832 3833
{
	struct in6_addr *gateway = (struct in6_addr *)arg;

3834
	if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
3835
	    ipv6_addr_equal(gateway, &rt->fib6_nh.nh_gw)) {
3836 3837
		return -1;
	}
3838 3839 3840 3841 3842 3843 3844

	/* Further clean up cached routes in exception table.
	 * This is needed because cached route may have a different
	 * gateway than its 'parent' in the case of an ip redirect.
	 */
	rt6_exceptions_clean_tohost(rt, gateway);

3845 3846 3847 3848 3849 3850 3851 3852
	return 0;
}

void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
{
	fib6_clean_all(net, fib6_clean_tohost, gateway);
}

3853 3854
struct arg_netdev_event {
	const struct net_device *dev;
3855 3856 3857 3858
	union {
		unsigned int nh_flags;
		unsigned long event;
	};
3859 3860
};

3861
static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
3862
{
3863
	struct fib6_info *iter;
3864 3865
	struct fib6_node *fn;

3866 3867
	fn = rcu_dereference_protected(rt->fib6_node,
			lockdep_is_held(&rt->fib6_table->tb6_lock));
3868
	iter = rcu_dereference_protected(fn->leaf,
3869
			lockdep_is_held(&rt->fib6_table->tb6_lock));
3870
	while (iter) {
3871
		if (iter->fib6_metric == rt->fib6_metric &&
3872
		    rt6_qualify_for_ecmp(iter))
3873
			return iter;
3874
		iter = rcu_dereference_protected(iter->fib6_next,
3875
				lockdep_is_held(&rt->fib6_table->tb6_lock));
3876 3877 3878 3879 3880
	}

	return NULL;
}

3881
static bool rt6_is_dead(const struct fib6_info *rt)
3882
{
3883 3884
	if (rt->fib6_nh.nh_flags & RTNH_F_DEAD ||
	    (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
D
David Ahern 已提交
3885
	     fib6_ignore_linkdown(rt)))
3886 3887 3888 3889 3890
		return true;

	return false;
}

3891
static int rt6_multipath_total_weight(const struct fib6_info *rt)
3892
{
3893
	struct fib6_info *iter;
3894 3895 3896
	int total = 0;

	if (!rt6_is_dead(rt))
3897
		total += rt->fib6_nh.nh_weight;
3898

3899
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
3900
		if (!rt6_is_dead(iter))
3901
			total += iter->fib6_nh.nh_weight;
3902 3903 3904 3905 3906
	}

	return total;
}

3907
static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
3908 3909 3910 3911
{
	int upper_bound = -1;

	if (!rt6_is_dead(rt)) {
3912
		*weight += rt->fib6_nh.nh_weight;
3913 3914 3915
		upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
						    total) - 1;
	}
3916
	atomic_set(&rt->fib6_nh.nh_upper_bound, upper_bound);
3917 3918
}

3919
static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
3920
{
3921
	struct fib6_info *iter;
3922 3923 3924 3925
	int weight = 0;

	rt6_upper_bound_set(rt, &weight, total);

3926
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
3927 3928 3929
		rt6_upper_bound_set(iter, &weight, total);
}

3930
void rt6_multipath_rebalance(struct fib6_info *rt)
3931
{
3932
	struct fib6_info *first;
3933 3934 3935 3936 3937 3938
	int total;

	/* In case the entire multipath route was marked for flushing,
	 * then there is no need to rebalance upon the removal of every
	 * sibling route.
	 */
3939
	if (!rt->fib6_nsiblings || rt->should_flush)
3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953
		return;

	/* During lookup routes are evaluated in order, so we need to
	 * make sure upper bounds are assigned from the first sibling
	 * onwards.
	 */
	first = rt6_multipath_first_sibling(rt);
	if (WARN_ON_ONCE(!first))
		return;

	total = rt6_multipath_total_weight(first);
	rt6_multipath_upper_bound_set(first, total);
}

3954
static int fib6_ifup(struct fib6_info *rt, void *p_arg)
3955 3956
{
	const struct arg_netdev_event *arg = p_arg;
3957
	struct net *net = dev_net(arg->dev);
3958

D
David Ahern 已提交
3959
	if (rt != net->ipv6.fib6_null_entry && rt->fib6_nh.nh_dev == arg->dev) {
3960
		rt->fib6_nh.nh_flags &= ~arg->nh_flags;
3961
		fib6_update_sernum_upto_root(net, rt);
3962
		rt6_multipath_rebalance(rt);
3963
	}
3964 3965 3966 3967 3968 3969 3970 3971

	return 0;
}

void rt6_sync_up(struct net_device *dev, unsigned int nh_flags)
{
	struct arg_netdev_event arg = {
		.dev = dev,
I
Ido Schimmel 已提交
3972 3973 3974
		{
			.nh_flags = nh_flags,
		},
3975 3976 3977 3978 3979 3980 3981 3982
	};

	if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
		arg.nh_flags |= RTNH_F_LINKDOWN;

	fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
}

3983
static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
3984 3985
				   const struct net_device *dev)
{
3986
	struct fib6_info *iter;
3987

3988
	if (rt->fib6_nh.nh_dev == dev)
3989
		return true;
3990
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
3991
		if (iter->fib6_nh.nh_dev == dev)
3992 3993 3994 3995 3996
			return true;

	return false;
}

3997
static void rt6_multipath_flush(struct fib6_info *rt)
3998
{
3999
	struct fib6_info *iter;
4000 4001

	rt->should_flush = 1;
4002
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4003 4004 4005
		iter->should_flush = 1;
}

4006
static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4007 4008
					     const struct net_device *down_dev)
{
4009
	struct fib6_info *iter;
4010 4011
	unsigned int dead = 0;

4012 4013
	if (rt->fib6_nh.nh_dev == down_dev ||
	    rt->fib6_nh.nh_flags & RTNH_F_DEAD)
4014
		dead++;
4015
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4016 4017
		if (iter->fib6_nh.nh_dev == down_dev ||
		    iter->fib6_nh.nh_flags & RTNH_F_DEAD)
4018 4019 4020 4021 4022
			dead++;

	return dead;
}

4023
static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4024 4025 4026
				       const struct net_device *dev,
				       unsigned int nh_flags)
{
4027
	struct fib6_info *iter;
4028

4029 4030
	if (rt->fib6_nh.nh_dev == dev)
		rt->fib6_nh.nh_flags |= nh_flags;
4031
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4032 4033
		if (iter->fib6_nh.nh_dev == dev)
			iter->fib6_nh.nh_flags |= nh_flags;
4034 4035
}

4036
/* called with write lock held for table with rt */
4037
static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
L
Linus Torvalds 已提交
4038
{
4039 4040
	const struct arg_netdev_event *arg = p_arg;
	const struct net_device *dev = arg->dev;
4041
	struct net *net = dev_net(dev);
4042

D
David Ahern 已提交
4043
	if (rt == net->ipv6.fib6_null_entry)
4044 4045 4046 4047
		return 0;

	switch (arg->event) {
	case NETDEV_UNREGISTER:
4048
		return rt->fib6_nh.nh_dev == dev ? -1 : 0;
4049
	case NETDEV_DOWN:
4050
		if (rt->should_flush)
4051
			return -1;
4052
		if (!rt->fib6_nsiblings)
4053
			return rt->fib6_nh.nh_dev == dev ? -1 : 0;
4054 4055 4056 4057
		if (rt6_multipath_uses_dev(rt, dev)) {
			unsigned int count;

			count = rt6_multipath_dead_count(rt, dev);
4058
			if (rt->fib6_nsiblings + 1 == count) {
4059 4060 4061 4062 4063
				rt6_multipath_flush(rt);
				return -1;
			}
			rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
						   RTNH_F_LINKDOWN);
4064
			fib6_update_sernum(net, rt);
4065
			rt6_multipath_rebalance(rt);
4066 4067
		}
		return -2;
4068
	case NETDEV_CHANGE:
4069
		if (rt->fib6_nh.nh_dev != dev ||
4070
		    rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4071
			break;
4072
		rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
4073
		rt6_multipath_rebalance(rt);
4074
		break;
4075
	}
4076

L
Linus Torvalds 已提交
4077 4078 4079
	return 0;
}

4080
void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
L
Linus Torvalds 已提交
4081
{
4082
	struct arg_netdev_event arg = {
4083
		.dev = dev,
I
Ido Schimmel 已提交
4084 4085 4086
		{
			.event = event,
		},
4087 4088
	};

4089 4090 4091 4092 4093 4094 4095 4096
	fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
}

void rt6_disable_ip(struct net_device *dev, unsigned long event)
{
	rt6_sync_down_dev(dev, event);
	rt6_uncached_list_flush_dev(dev_net(dev), dev);
	neigh_ifdown(&nd_tbl, dev);
L
Linus Torvalds 已提交
4097 4098
}

4099
struct rt6_mtu_change_arg {
L
Linus Torvalds 已提交
4100
	struct net_device *dev;
4101
	unsigned int mtu;
L
Linus Torvalds 已提交
4102 4103
};

4104
static int rt6_mtu_change_route(struct fib6_info *rt, void *p_arg)
L
Linus Torvalds 已提交
4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115
{
	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
	struct inet6_dev *idev;

	/* In IPv6 pmtu discovery is not optional,
	   so that RTAX_MTU lock cannot disable it.
	   We still use this lock to block changes
	   caused by addrconf/ndisc.
	*/

	idev = __in6_dev_get(arg->dev);
4116
	if (!idev)
L
Linus Torvalds 已提交
4117 4118 4119 4120 4121 4122 4123
		return 0;

	/* For administrative MTU increase, there is no way to discover
	   IPv6 PMTU increase, so PMTU increase should be updated here.
	   Since RFC 1981 doesn't include administrative MTU increase
	   update PMTU increase is a MUST. (i.e. jumbo frame)
	 */
4124
	if (rt->fib6_nh.nh_dev == arg->dev &&
4125 4126 4127 4128 4129 4130 4131
	    !fib6_metric_locked(rt, RTAX_MTU)) {
		u32 mtu = rt->fib6_pmtu;

		if (mtu >= arg->mtu ||
		    (mtu < arg->mtu && mtu == idev->cnf.mtu6))
			fib6_metric_set(rt, RTAX_MTU, arg->mtu);

4132
		spin_lock_bh(&rt6_exception_lock);
4133
		rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
4134
		spin_unlock_bh(&rt6_exception_lock);
4135
	}
L
Linus Torvalds 已提交
4136 4137 4138
	return 0;
}

4139
void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
L
Linus Torvalds 已提交
4140
{
T
Thomas Graf 已提交
4141 4142 4143 4144
	struct rt6_mtu_change_arg arg = {
		.dev = dev,
		.mtu = mtu,
	};
L
Linus Torvalds 已提交
4145

4146
	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
L
Linus Torvalds 已提交
4147 4148
}

4149
static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4150
	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
4151
	[RTA_PREFSRC]		= { .len = sizeof(struct in6_addr) },
4152
	[RTA_OIF]               = { .type = NLA_U32 },
4153
	[RTA_IIF]		= { .type = NLA_U32 },
4154 4155
	[RTA_PRIORITY]          = { .type = NLA_U32 },
	[RTA_METRICS]           = { .type = NLA_NESTED },
4156
	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
4157
	[RTA_PREF]              = { .type = NLA_U8 },
4158 4159
	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
	[RTA_ENCAP]		= { .type = NLA_NESTED },
4160
	[RTA_EXPIRES]		= { .type = NLA_U32 },
4161
	[RTA_UID]		= { .type = NLA_U32 },
4162
	[RTA_MARK]		= { .type = NLA_U32 },
4163
	[RTA_TABLE]		= { .type = NLA_U32 },
4164 4165 4166
	[RTA_IP_PROTO]		= { .type = NLA_U8 },
	[RTA_SPORT]		= { .type = NLA_U16 },
	[RTA_DPORT]		= { .type = NLA_U16 },
4167 4168 4169
};

static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4170 4171
			      struct fib6_config *cfg,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
4172
{
4173 4174
	struct rtmsg *rtm;
	struct nlattr *tb[RTA_MAX+1];
4175
	unsigned int pref;
4176
	int err;
L
Linus Torvalds 已提交
4177

4178 4179
	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
			  NULL);
4180 4181
	if (err < 0)
		goto errout;
L
Linus Torvalds 已提交
4182

4183 4184 4185 4186 4187 4188 4189 4190 4191
	err = -EINVAL;
	rtm = nlmsg_data(nlh);
	memset(cfg, 0, sizeof(*cfg));

	cfg->fc_table = rtm->rtm_table;
	cfg->fc_dst_len = rtm->rtm_dst_len;
	cfg->fc_src_len = rtm->rtm_src_len;
	cfg->fc_flags = RTF_UP;
	cfg->fc_protocol = rtm->rtm_protocol;
4192
	cfg->fc_type = rtm->rtm_type;
4193

4194 4195
	if (rtm->rtm_type == RTN_UNREACHABLE ||
	    rtm->rtm_type == RTN_BLACKHOLE ||
4196 4197
	    rtm->rtm_type == RTN_PROHIBIT ||
	    rtm->rtm_type == RTN_THROW)
4198 4199
		cfg->fc_flags |= RTF_REJECT;

4200 4201 4202
	if (rtm->rtm_type == RTN_LOCAL)
		cfg->fc_flags |= RTF_LOCAL;

4203 4204 4205
	if (rtm->rtm_flags & RTM_F_CLONED)
		cfg->fc_flags |= RTF_CACHE;

4206 4207
	cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);

4208
	cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
4209
	cfg->fc_nlinfo.nlh = nlh;
4210
	cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
4211 4212

	if (tb[RTA_GATEWAY]) {
4213
		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4214
		cfg->fc_flags |= RTF_GATEWAY;
L
Linus Torvalds 已提交
4215
	}
4216 4217 4218 4219
	if (tb[RTA_VIA]) {
		NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
		goto errout;
	}
4220 4221 4222 4223 4224 4225 4226 4227

	if (tb[RTA_DST]) {
		int plen = (rtm->rtm_dst_len + 7) >> 3;

		if (nla_len(tb[RTA_DST]) < plen)
			goto errout;

		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
L
Linus Torvalds 已提交
4228
	}
4229 4230 4231 4232 4233 4234 4235 4236

	if (tb[RTA_SRC]) {
		int plen = (rtm->rtm_src_len + 7) >> 3;

		if (nla_len(tb[RTA_SRC]) < plen)
			goto errout;

		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
L
Linus Torvalds 已提交
4237
	}
4238

4239
	if (tb[RTA_PREFSRC])
4240
		cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4241

4242 4243 4244 4245 4246 4247 4248 4249 4250
	if (tb[RTA_OIF])
		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);

	if (tb[RTA_PRIORITY])
		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);

	if (tb[RTA_METRICS]) {
		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
L
Linus Torvalds 已提交
4251
	}
4252 4253 4254 4255

	if (tb[RTA_TABLE])
		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);

4256 4257 4258
	if (tb[RTA_MULTIPATH]) {
		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4259 4260

		err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4261
						     cfg->fc_mp_len, extack);
4262 4263
		if (err < 0)
			goto errout;
4264 4265
	}

4266 4267 4268 4269 4270 4271 4272 4273
	if (tb[RTA_PREF]) {
		pref = nla_get_u8(tb[RTA_PREF]);
		if (pref != ICMPV6_ROUTER_PREF_LOW &&
		    pref != ICMPV6_ROUTER_PREF_HIGH)
			pref = ICMPV6_ROUTER_PREF_MEDIUM;
		cfg->fc_flags |= RTF_PREF(pref);
	}

4274 4275 4276
	if (tb[RTA_ENCAP])
		cfg->fc_encap = tb[RTA_ENCAP];

4277
	if (tb[RTA_ENCAP_TYPE]) {
4278 4279
		cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);

4280
		err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
4281 4282 4283 4284
		if (err < 0)
			goto errout;
	}

4285 4286 4287 4288 4289 4290 4291 4292 4293
	if (tb[RTA_EXPIRES]) {
		unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);

		if (addrconf_finite_timeout(timeout)) {
			cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
			cfg->fc_flags |= RTF_EXPIRES;
		}
	}

4294 4295 4296
	err = 0;
errout:
	return err;
L
Linus Torvalds 已提交
4297 4298
}

4299
struct rt6_nh {
4300
	struct fib6_info *fib6_info;
4301 4302 4303 4304 4305 4306 4307 4308 4309
	struct fib6_config r_cfg;
	struct list_head next;
};

static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
{
	struct rt6_nh *nh;

	list_for_each_entry(nh, rt6_nh_list, next) {
4310
		pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
4311 4312 4313 4314 4315
		        &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
		        nh->r_cfg.fc_ifindex);
	}
}

4316 4317
static int ip6_route_info_append(struct net *net,
				 struct list_head *rt6_nh_list,
4318 4319
				 struct fib6_info *rt,
				 struct fib6_config *r_cfg)
4320 4321 4322 4323 4324
{
	struct rt6_nh *nh;
	int err = -EEXIST;

	list_for_each_entry(nh, rt6_nh_list, next) {
4325 4326
		/* check if fib6_info already exists */
		if (rt6_duplicate_nexthop(nh->fib6_info, rt))
4327 4328 4329 4330 4331 4332
			return err;
	}

	nh = kzalloc(sizeof(*nh), GFP_KERNEL);
	if (!nh)
		return -ENOMEM;
4333
	nh->fib6_info = rt;
4334 4335 4336 4337 4338 4339
	memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
	list_add_tail(&nh->next, rt6_nh_list);

	return 0;
}

4340 4341
static void ip6_route_mpath_notify(struct fib6_info *rt,
				   struct fib6_info *rt_last,
4342 4343 4344 4345 4346 4347 4348 4349 4350
				   struct nl_info *info,
				   __u16 nlflags)
{
	/* if this is an APPEND route, then rt points to the first route
	 * inserted and rt_last points to last route inserted. Userspace
	 * wants a consistent dump of the route which starts at the first
	 * nexthop. Since sibling routes are always added at the end of
	 * the list, find the first sibling of the last route appended
	 */
4351 4352
	if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
		rt = list_first_entry(&rt_last->fib6_siblings,
4353
				      struct fib6_info,
4354
				      fib6_siblings);
4355 4356 4357 4358 4359 4360
	}

	if (rt)
		inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
}

4361 4362
static int ip6_route_multipath_add(struct fib6_config *cfg,
				   struct netlink_ext_ack *extack)
4363
{
4364
	struct fib6_info *rt_notif = NULL, *rt_last = NULL;
4365
	struct nl_info *info = &cfg->fc_nlinfo;
4366 4367
	struct fib6_config r_cfg;
	struct rtnexthop *rtnh;
4368
	struct fib6_info *rt;
4369 4370
	struct rt6_nh *err_nh;
	struct rt6_nh *nh, *nh_safe;
4371
	__u16 nlflags;
4372 4373
	int remaining;
	int attrlen;
4374 4375 4376 4377 4378
	int err = 1;
	int nhn = 0;
	int replace = (cfg->fc_nlinfo.nlh &&
		       (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
	LIST_HEAD(rt6_nh_list);
4379

4380 4381 4382 4383
	nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
	if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
		nlflags |= NLM_F_APPEND;

4384
	remaining = cfg->fc_mp_len;
4385 4386
	rtnh = (struct rtnexthop *)cfg->fc_mp;

4387
	/* Parse a Multipath Entry and build a list (rt6_nh_list) of
4388
	 * fib6_info structs per nexthop
4389
	 */
4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400
	while (rtnh_ok(rtnh, remaining)) {
		memcpy(&r_cfg, cfg, sizeof(*cfg));
		if (rtnh->rtnh_ifindex)
			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;

		attrlen = rtnh_attrlen(rtnh);
		if (attrlen > 0) {
			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);

			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
			if (nla) {
4401
				r_cfg.fc_gateway = nla_get_in6_addr(nla);
4402 4403
				r_cfg.fc_flags |= RTF_GATEWAY;
			}
4404 4405 4406 4407
			r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
			nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
			if (nla)
				r_cfg.fc_encap_type = nla_get_u16(nla);
4408
		}
4409

4410
		r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
4411
		rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
4412 4413 4414
		if (IS_ERR(rt)) {
			err = PTR_ERR(rt);
			rt = NULL;
4415
			goto cleanup;
4416
		}
4417 4418 4419 4420 4421 4422 4423
		if (!rt6_qualify_for_ecmp(rt)) {
			err = -EINVAL;
			NL_SET_ERR_MSG(extack,
				       "Device only routes can not be added for IPv6 using the multipath API.");
			fib6_info_release(rt);
			goto cleanup;
		}
4424

4425
		rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1;
4426

4427 4428
		err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
					    rt, &r_cfg);
4429
		if (err) {
4430
			fib6_info_release(rt);
4431 4432 4433 4434 4435 4436
			goto cleanup;
		}

		rtnh = rtnh_next(rtnh, &remaining);
	}

4437 4438 4439 4440 4441 4442
	/* for add and replace send one notification with all nexthops.
	 * Skip the notification in fib6_add_rt2node and send one with
	 * the full route when done
	 */
	info->skip_notify = 1;

4443 4444
	err_nh = NULL;
	list_for_each_entry(nh, &rt6_nh_list, next) {
4445 4446
		err = __ip6_ins_rt(nh->fib6_info, info, extack);
		fib6_info_release(nh->fib6_info);
4447

4448 4449 4450 4451 4452 4453 4454 4455
		if (!err) {
			/* save reference to last route successfully inserted */
			rt_last = nh->fib6_info;

			/* save reference to first route for notification */
			if (!rt_notif)
				rt_notif = nh->fib6_info;
		}
4456

4457 4458
		/* nh->fib6_info is used or freed at this point, reset to NULL*/
		nh->fib6_info = NULL;
4459 4460 4461 4462 4463
		if (err) {
			if (replace && nhn)
				ip6_print_replace_route_err(&rt6_nh_list);
			err_nh = nh;
			goto add_errout;
4464
		}
4465

4466
		/* Because each route is added like a single route we remove
4467 4468 4469 4470 4471
		 * these flags after the first nexthop: if there is a collision,
		 * we have already failed to add the first nexthop:
		 * fib6_add_rt2node() has rejected it; when replacing, old
		 * nexthops have been replaced by first new, the rest should
		 * be added to it.
4472
		 */
4473 4474
		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
						     NLM_F_REPLACE);
4475 4476 4477
		nhn++;
	}

4478 4479
	/* success ... tell user about new route */
	ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4480 4481 4482
	goto cleanup;

add_errout:
4483 4484 4485 4486 4487 4488 4489
	/* send notification for routes that were added so that
	 * the delete notifications sent by ip6_route_del are
	 * coherent
	 */
	if (rt_notif)
		ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);

4490 4491 4492 4493
	/* Delete routes that were already added */
	list_for_each_entry(nh, &rt6_nh_list, next) {
		if (err_nh == nh)
			break;
4494
		ip6_route_del(&nh->r_cfg, extack);
4495 4496 4497 4498
	}

cleanup:
	list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
4499 4500
		if (nh->fib6_info)
			fib6_info_release(nh->fib6_info);
4501 4502 4503 4504 4505 4506 4507
		list_del(&nh->next);
		kfree(nh);
	}

	return err;
}

4508 4509
static int ip6_route_multipath_del(struct fib6_config *cfg,
				   struct netlink_ext_ack *extack)
4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535
{
	struct fib6_config r_cfg;
	struct rtnexthop *rtnh;
	int remaining;
	int attrlen;
	int err = 1, last_err = 0;

	remaining = cfg->fc_mp_len;
	rtnh = (struct rtnexthop *)cfg->fc_mp;

	/* Parse a Multipath Entry */
	while (rtnh_ok(rtnh, remaining)) {
		memcpy(&r_cfg, cfg, sizeof(*cfg));
		if (rtnh->rtnh_ifindex)
			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;

		attrlen = rtnh_attrlen(rtnh);
		if (attrlen > 0) {
			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);

			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
			if (nla) {
				nla_memcpy(&r_cfg.fc_gateway, nla, 16);
				r_cfg.fc_flags |= RTF_GATEWAY;
			}
		}
4536
		err = ip6_route_del(&r_cfg, extack);
4537 4538 4539
		if (err)
			last_err = err;

4540 4541 4542 4543 4544 4545
		rtnh = rtnh_next(rtnh, &remaining);
	}

	return last_err;
}

4546 4547
static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
4548
{
4549 4550
	struct fib6_config cfg;
	int err;
L
Linus Torvalds 已提交
4551

4552
	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4553 4554 4555
	if (err < 0)
		return err;

4556
	if (cfg.fc_mp)
4557
		return ip6_route_multipath_del(&cfg, extack);
4558 4559
	else {
		cfg.fc_delete_all_nh = 1;
4560
		return ip6_route_del(&cfg, extack);
4561
	}
L
Linus Torvalds 已提交
4562 4563
}

4564 4565
static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
4566
{
4567 4568
	struct fib6_config cfg;
	int err;
L
Linus Torvalds 已提交
4569

4570
	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4571 4572 4573
	if (err < 0)
		return err;

4574
	if (cfg.fc_mp)
4575
		return ip6_route_multipath_add(&cfg, extack);
4576
	else
4577
		return ip6_route_add(&cfg, GFP_KERNEL, extack);
L
Linus Torvalds 已提交
4578 4579
}

4580
static size_t rt6_nlmsg_size(struct fib6_info *rt)
4581
{
4582 4583
	int nexthop_len = 0;

4584
	if (rt->fib6_nsiblings) {
4585 4586 4587
		nexthop_len = nla_total_size(0)	 /* RTA_MULTIPATH */
			    + NLA_ALIGN(sizeof(struct rtnexthop))
			    + nla_total_size(16) /* RTA_GATEWAY */
4588
			    + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate);
4589

4590
		nexthop_len *= rt->fib6_nsiblings;
4591 4592
	}

4593 4594 4595 4596 4597 4598 4599 4600 4601
	return NLMSG_ALIGN(sizeof(struct rtmsg))
	       + nla_total_size(16) /* RTA_SRC */
	       + nla_total_size(16) /* RTA_DST */
	       + nla_total_size(16) /* RTA_GATEWAY */
	       + nla_total_size(16) /* RTA_PREFSRC */
	       + nla_total_size(4) /* RTA_TABLE */
	       + nla_total_size(4) /* RTA_IIF */
	       + nla_total_size(4) /* RTA_OIF */
	       + nla_total_size(4) /* RTA_PRIORITY */
4602
	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
4603
	       + nla_total_size(sizeof(struct rta_cacheinfo))
4604
	       + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
4605
	       + nla_total_size(1) /* RTA_PREF */
4606
	       + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate)
4607 4608 4609
	       + nexthop_len;
}

4610
static int rt6_nexthop_info(struct sk_buff *skb, struct fib6_info *rt,
4611
			    unsigned int *flags, bool skip_oif)
4612
{
4613
	if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
4614 4615
		*flags |= RTNH_F_DEAD;

4616
	if (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN) {
4617
		*flags |= RTNH_F_LINKDOWN;
D
David Ahern 已提交
4618 4619 4620

		rcu_read_lock();
		if (fib6_ignore_linkdown(rt))
4621
			*flags |= RTNH_F_DEAD;
D
David Ahern 已提交
4622
		rcu_read_unlock();
4623 4624
	}

4625
	if (rt->fib6_flags & RTF_GATEWAY) {
4626
		if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->fib6_nh.nh_gw) < 0)
4627 4628 4629
			goto nla_put_failure;
	}

4630 4631
	*flags |= (rt->fib6_nh.nh_flags & RTNH_F_ONLINK);
	if (rt->fib6_nh.nh_flags & RTNH_F_OFFLOAD)
4632 4633
		*flags |= RTNH_F_OFFLOAD;

4634
	/* not needed for multipath encoding b/c it has a rtnexthop struct */
4635 4636
	if (!skip_oif && rt->fib6_nh.nh_dev &&
	    nla_put_u32(skb, RTA_OIF, rt->fib6_nh.nh_dev->ifindex))
4637 4638
		goto nla_put_failure;

4639 4640
	if (rt->fib6_nh.nh_lwtstate &&
	    lwtunnel_fill_encap(skb, rt->fib6_nh.nh_lwtstate) < 0)
4641 4642 4643 4644 4645 4646 4647 4648
		goto nla_put_failure;

	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

4649
/* add multipath next hop */
4650
static int rt6_add_nexthop(struct sk_buff *skb, struct fib6_info *rt)
4651
{
4652
	const struct net_device *dev = rt->fib6_nh.nh_dev;
4653 4654 4655 4656 4657 4658 4659
	struct rtnexthop *rtnh;
	unsigned int flags = 0;

	rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
	if (!rtnh)
		goto nla_put_failure;

4660 4661
	rtnh->rtnh_hops = rt->fib6_nh.nh_weight - 1;
	rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
4662

4663
	if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674
		goto nla_put_failure;

	rtnh->rtnh_flags = flags;

	/* length of rtnetlink header + attributes */
	rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;

	return 0;

nla_put_failure:
	return -EMSGSIZE;
4675 4676
}

4677
static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4678
			 struct fib6_info *rt, struct dst_entry *dst,
4679
			 struct in6_addr *dest, struct in6_addr *src,
4680
			 int iif, int type, u32 portid, u32 seq,
4681
			 unsigned int flags)
L
Linus Torvalds 已提交
4682
{
4683 4684 4685
	struct rt6_info *rt6 = (struct rt6_info *)dst;
	struct rt6key *rt6_dst, *rt6_src;
	u32 *pmetrics, table, rt6_flags;
4686
	struct nlmsghdr *nlh;
4687
	struct rtmsg *rtm;
4688
	long expires = 0;
L
Linus Torvalds 已提交
4689

4690
	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4691
	if (!nlh)
4692
		return -EMSGSIZE;
4693

4694 4695 4696 4697 4698 4699 4700 4701 4702 4703
	if (rt6) {
		rt6_dst = &rt6->rt6i_dst;
		rt6_src = &rt6->rt6i_src;
		rt6_flags = rt6->rt6i_flags;
	} else {
		rt6_dst = &rt->fib6_dst;
		rt6_src = &rt->fib6_src;
		rt6_flags = rt->fib6_flags;
	}

4704
	rtm = nlmsg_data(nlh);
L
Linus Torvalds 已提交
4705
	rtm->rtm_family = AF_INET6;
4706 4707
	rtm->rtm_dst_len = rt6_dst->plen;
	rtm->rtm_src_len = rt6_src->plen;
L
Linus Torvalds 已提交
4708
	rtm->rtm_tos = 0;
4709 4710
	if (rt->fib6_table)
		table = rt->fib6_table->tb6_id;
T
Thomas Graf 已提交
4711
	else
4712
		table = RT6_TABLE_UNSPEC;
4713
	rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
D
David S. Miller 已提交
4714 4715
	if (nla_put_u32(skb, RTA_TABLE, table))
		goto nla_put_failure;
4716 4717

	rtm->rtm_type = rt->fib6_type;
L
Linus Torvalds 已提交
4718 4719
	rtm->rtm_flags = 0;
	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4720
	rtm->rtm_protocol = rt->fib6_protocol;
L
Linus Torvalds 已提交
4721

4722
	if (rt6_flags & RTF_CACHE)
L
Linus Torvalds 已提交
4723 4724
		rtm->rtm_flags |= RTM_F_CLONED;

4725 4726
	if (dest) {
		if (nla_put_in6_addr(skb, RTA_DST, dest))
D
David S. Miller 已提交
4727
			goto nla_put_failure;
4728
		rtm->rtm_dst_len = 128;
L
Linus Torvalds 已提交
4729
	} else if (rtm->rtm_dst_len)
4730
		if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
D
David S. Miller 已提交
4731
			goto nla_put_failure;
L
Linus Torvalds 已提交
4732 4733
#ifdef CONFIG_IPV6_SUBTREES
	if (src) {
4734
		if (nla_put_in6_addr(skb, RTA_SRC, src))
D
David S. Miller 已提交
4735
			goto nla_put_failure;
4736
		rtm->rtm_src_len = 128;
D
David S. Miller 已提交
4737
	} else if (rtm->rtm_src_len &&
4738
		   nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
D
David S. Miller 已提交
4739
		goto nla_put_failure;
L
Linus Torvalds 已提交
4740
#endif
4741 4742
	if (iif) {
#ifdef CONFIG_IPV6_MROUTE
4743
		if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
4744 4745 4746 4747 4748 4749
			int err = ip6mr_get_route(net, skb, rtm, portid);

			if (err == 0)
				return 0;
			if (err < 0)
				goto nla_put_failure;
4750 4751
		} else
#endif
D
David S. Miller 已提交
4752 4753
			if (nla_put_u32(skb, RTA_IIF, iif))
				goto nla_put_failure;
4754
	} else if (dest) {
L
Linus Torvalds 已提交
4755
		struct in6_addr saddr_buf;
4756
		if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
4757
		    nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
D
David S. Miller 已提交
4758
			goto nla_put_failure;
L
Linus Torvalds 已提交
4759
	}
4760

4761
	if (rt->fib6_prefsrc.plen) {
4762
		struct in6_addr saddr_buf;
4763
		saddr_buf = rt->fib6_prefsrc.addr;
4764
		if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
D
David S. Miller 已提交
4765
			goto nla_put_failure;
4766 4767
	}

4768 4769
	pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
	if (rtnetlink_put_metrics(skb, pmetrics) < 0)
4770 4771
		goto nla_put_failure;

4772
	if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
D
David S. Miller 已提交
4773
		goto nla_put_failure;
4774

4775 4776 4777
	/* For multipath routes, walk the siblings list and add
	 * each as a nexthop within RTA_MULTIPATH.
	 */
4778 4779 4780 4781 4782 4783 4784 4785
	if (rt6) {
		if (rt6_flags & RTF_GATEWAY &&
		    nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
			goto nla_put_failure;

		if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
			goto nla_put_failure;
	} else if (rt->fib6_nsiblings) {
4786
		struct fib6_info *sibling, *next_sibling;
4787 4788 4789 4790 4791 4792 4793 4794 4795 4796
		struct nlattr *mp;

		mp = nla_nest_start(skb, RTA_MULTIPATH);
		if (!mp)
			goto nla_put_failure;

		if (rt6_add_nexthop(skb, rt) < 0)
			goto nla_put_failure;

		list_for_each_entry_safe(sibling, next_sibling,
4797
					 &rt->fib6_siblings, fib6_siblings) {
4798 4799 4800 4801 4802 4803
			if (rt6_add_nexthop(skb, sibling) < 0)
				goto nla_put_failure;
		}

		nla_nest_end(skb, mp);
	} else {
4804
		if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
4805 4806 4807
			goto nla_put_failure;
	}

4808
	if (rt6_flags & RTF_EXPIRES) {
4809 4810 4811
		expires = dst ? dst->expires : rt->expires;
		expires -= jiffies;
	}
4812

4813
	if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
4814
		goto nla_put_failure;
4815

4816
	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
4817 4818
		goto nla_put_failure;

4819

4820 4821
	nlmsg_end(skb, nlh);
	return 0;
4822 4823

nla_put_failure:
4824 4825
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
L
Linus Torvalds 已提交
4826 4827
}

4828
int rt6_dump_route(struct fib6_info *rt, void *p_arg)
L
Linus Torvalds 已提交
4829 4830
{
	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
4831 4832
	struct net *net = arg->net;

D
David Ahern 已提交
4833
	if (rt == net->ipv6.fib6_null_entry)
4834
		return 0;
L
Linus Torvalds 已提交
4835

4836 4837
	if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
		struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
4838 4839 4840

		/* user wants prefix routes only */
		if (rtm->rtm_flags & RTM_F_PREFIX &&
4841
		    !(rt->fib6_flags & RTF_PREFIX_RT)) {
4842 4843 4844 4845
			/* success since this is not a prefix route */
			return 1;
		}
	}
L
Linus Torvalds 已提交
4846

4847 4848 4849
	return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0,
			     RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid,
			     arg->cb->nlh->nlmsg_seq, NLM_F_MULTI);
L
Linus Torvalds 已提交
4850 4851
}

4852 4853
static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
4854
{
4855
	struct net *net = sock_net(in_skb->sk);
4856
	struct nlattr *tb[RTA_MAX+1];
4857
	int err, iif = 0, oif = 0;
4858
	struct fib6_info *from;
4859
	struct dst_entry *dst;
4860
	struct rt6_info *rt;
L
Linus Torvalds 已提交
4861
	struct sk_buff *skb;
4862
	struct rtmsg *rtm;
4863
	struct flowi6 fl6;
4864
	bool fibmatch;
L
Linus Torvalds 已提交
4865

4866
	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4867
			  extack);
4868 4869
	if (err < 0)
		goto errout;
L
Linus Torvalds 已提交
4870

4871
	err = -EINVAL;
4872
	memset(&fl6, 0, sizeof(fl6));
4873 4874
	rtm = nlmsg_data(nlh);
	fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
4875
	fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
L
Linus Torvalds 已提交
4876

4877 4878 4879 4880
	if (tb[RTA_SRC]) {
		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
			goto errout;

A
Alexey Dobriyan 已提交
4881
		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
4882 4883 4884 4885 4886 4887
	}

	if (tb[RTA_DST]) {
		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
			goto errout;

A
Alexey Dobriyan 已提交
4888
		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
4889 4890 4891 4892 4893 4894
	}

	if (tb[RTA_IIF])
		iif = nla_get_u32(tb[RTA_IIF]);

	if (tb[RTA_OIF])
4895
		oif = nla_get_u32(tb[RTA_OIF]);
L
Linus Torvalds 已提交
4896

4897 4898 4899
	if (tb[RTA_MARK])
		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);

4900 4901 4902 4903 4904 4905
	if (tb[RTA_UID])
		fl6.flowi6_uid = make_kuid(current_user_ns(),
					   nla_get_u32(tb[RTA_UID]));
	else
		fl6.flowi6_uid = iif ? INVALID_UID : current_uid();

4906 4907 4908 4909 4910 4911 4912 4913
	if (tb[RTA_SPORT])
		fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);

	if (tb[RTA_DPORT])
		fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);

	if (tb[RTA_IP_PROTO]) {
		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
4914 4915
						  &fl6.flowi6_proto, AF_INET6,
						  extack);
4916 4917 4918 4919
		if (err)
			goto errout;
	}

L
Linus Torvalds 已提交
4920 4921
	if (iif) {
		struct net_device *dev;
4922 4923
		int flags = 0;

4924 4925 4926
		rcu_read_lock();

		dev = dev_get_by_index_rcu(net, iif);
L
Linus Torvalds 已提交
4927
		if (!dev) {
4928
			rcu_read_unlock();
L
Linus Torvalds 已提交
4929
			err = -ENODEV;
4930
			goto errout;
L
Linus Torvalds 已提交
4931
		}
4932 4933 4934 4935 4936 4937

		fl6.flowi6_iif = iif;

		if (!ipv6_addr_any(&fl6.saddr))
			flags |= RT6_LOOKUP_F_HAS_SADDR;

D
David Ahern 已提交
4938
		dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
4939 4940

		rcu_read_unlock();
4941 4942 4943
	} else {
		fl6.flowi6_oif = oif;

4944
		dst = ip6_route_output(net, NULL, &fl6);
4945 4946 4947 4948 4949 4950 4951 4952
	}


	rt = container_of(dst, struct rt6_info, dst);
	if (rt->dst.error) {
		err = rt->dst.error;
		ip6_rt_put(rt);
		goto errout;
L
Linus Torvalds 已提交
4953 4954
	}

4955 4956 4957 4958 4959 4960
	if (rt == net->ipv6.ip6_null_entry) {
		err = rt->dst.error;
		ip6_rt_put(rt);
		goto errout;
	}

4961
	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
4962
	if (!skb) {
A
Amerigo Wang 已提交
4963
		ip6_rt_put(rt);
4964 4965 4966
		err = -ENOBUFS;
		goto errout;
	}
L
Linus Torvalds 已提交
4967

4968
	skb_dst_set(skb, &rt->dst);
4969 4970 4971 4972

	rcu_read_lock();
	from = rcu_dereference(rt->from);

4973
	if (fibmatch)
4974
		err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif,
4975 4976 4977
				    RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
				    nlh->nlmsg_seq, 0);
	else
4978 4979
		err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
				    &fl6.saddr, iif, RTM_NEWROUTE,
4980 4981
				    NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
				    0);
4982 4983
	rcu_read_unlock();

L
Linus Torvalds 已提交
4984
	if (err < 0) {
4985 4986
		kfree_skb(skb);
		goto errout;
L
Linus Torvalds 已提交
4987 4988
	}

4989
	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4990
errout:
L
Linus Torvalds 已提交
4991 4992 4993
	return err;
}

4994
void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
4995
		     unsigned int nlm_flags)
L
Linus Torvalds 已提交
4996 4997
{
	struct sk_buff *skb;
4998
	struct net *net = info->nl_net;
4999 5000 5001 5002
	u32 seq;
	int err;

	err = -ENOBUFS;
5003
	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5004

5005
	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5006
	if (!skb)
5007 5008
		goto errout;

5009 5010
	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
			    event, info->portid, seq, nlm_flags);
5011 5012 5013 5014 5015 5016
	if (err < 0) {
		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
		WARN_ON(err == -EMSGSIZE);
		kfree_skb(skb);
		goto errout;
	}
5017
	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5018 5019
		    info->nlh, gfp_any());
	return;
5020 5021
errout:
	if (err < 0)
5022
		rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
L
Linus Torvalds 已提交
5023 5024
}

5025
static int ip6_route_dev_notify(struct notifier_block *this,
5026
				unsigned long event, void *ptr)
5027
{
5028
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5029
	struct net *net = dev_net(dev);
5030

5031 5032 5033 5034
	if (!(dev->flags & IFF_LOOPBACK))
		return NOTIFY_OK;

	if (event == NETDEV_REGISTER) {
D
David Ahern 已提交
5035
		net->ipv6.fib6_null_entry->fib6_nh.nh_dev = dev;
5036
		net->ipv6.ip6_null_entry->dst.dev = dev;
5037 5038
		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5039
		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
5040
		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
5041
		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
5042
		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
5043
#endif
5044 5045 5046 5047 5048
	 } else if (event == NETDEV_UNREGISTER &&
		    dev->reg_state != NETREG_UNREGISTERED) {
		/* NETDEV_UNREGISTER could be fired for multiple times by
		 * netdev_wait_allrefs(). Make sure we only call this once.
		 */
5049
		in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
5050
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5051 5052
		in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
		in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
5053 5054 5055 5056 5057 5058
#endif
	}

	return NOTIFY_OK;
}

L
Linus Torvalds 已提交
5059 5060 5061 5062 5063 5064 5065
/*
 *	/proc
 */

#ifdef CONFIG_PROC_FS
static int rt6_stats_seq_show(struct seq_file *seq, void *v)
{
5066
	struct net *net = (struct net *)seq->private;
L
Linus Torvalds 已提交
5067
	seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
5068 5069
		   net->ipv6.rt6_stats->fib_nodes,
		   net->ipv6.rt6_stats->fib_route_nodes,
W
Wei Wang 已提交
5070
		   atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
5071 5072
		   net->ipv6.rt6_stats->fib_rt_entries,
		   net->ipv6.rt6_stats->fib_rt_cache,
5073
		   dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
5074
		   net->ipv6.rt6_stats->fib_discarded_routes);
L
Linus Torvalds 已提交
5075 5076 5077 5078 5079 5080 5081 5082

	return 0;
}
#endif	/* CONFIG_PROC_FS */

#ifdef CONFIG_SYSCTL

static
5083
int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
L
Linus Torvalds 已提交
5084 5085
			      void __user *buffer, size_t *lenp, loff_t *ppos)
{
5086 5087 5088
	struct net *net;
	int delay;
	if (!write)
L
Linus Torvalds 已提交
5089
		return -EINVAL;
5090 5091 5092 5093

	net = (struct net *)ctl->extra1;
	delay = net->ipv6.sysctl.flush_delay;
	proc_dointvec(ctl, write, buffer, lenp, ppos);
5094
	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
5095
	return 0;
L
Linus Torvalds 已提交
5096 5097
}

5098
struct ctl_table ipv6_route_table_template[] = {
5099
	{
L
Linus Torvalds 已提交
5100
		.procname	=	"flush",
5101
		.data		=	&init_net.ipv6.sysctl.flush_delay,
L
Linus Torvalds 已提交
5102
		.maxlen		=	sizeof(int),
5103
		.mode		=	0200,
A
Alexey Dobriyan 已提交
5104
		.proc_handler	=	ipv6_sysctl_rtcache_flush
L
Linus Torvalds 已提交
5105 5106 5107
	},
	{
		.procname	=	"gc_thresh",
5108
		.data		=	&ip6_dst_ops_template.gc_thresh,
L
Linus Torvalds 已提交
5109 5110
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5111
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
5112 5113 5114
	},
	{
		.procname	=	"max_size",
5115
		.data		=	&init_net.ipv6.sysctl.ip6_rt_max_size,
L
Linus Torvalds 已提交
5116 5117
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5118
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
5119 5120 5121
	},
	{
		.procname	=	"gc_min_interval",
5122
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
L
Linus Torvalds 已提交
5123 5124
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5125
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
5126 5127 5128
	},
	{
		.procname	=	"gc_timeout",
5129
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_timeout,
L
Linus Torvalds 已提交
5130 5131
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5132
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
5133 5134 5135
	},
	{
		.procname	=	"gc_interval",
5136
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_interval,
L
Linus Torvalds 已提交
5137 5138
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5139
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
5140 5141 5142
	},
	{
		.procname	=	"gc_elasticity",
5143
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
L
Linus Torvalds 已提交
5144 5145
		.maxlen		=	sizeof(int),
		.mode		=	0644,
5146
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
5147 5148 5149
	},
	{
		.procname	=	"mtu_expires",
5150
		.data		=	&init_net.ipv6.sysctl.ip6_rt_mtu_expires,
L
Linus Torvalds 已提交
5151 5152
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5153
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
5154 5155 5156
	},
	{
		.procname	=	"min_adv_mss",
5157
		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
L
Linus Torvalds 已提交
5158 5159
		.maxlen		=	sizeof(int),
		.mode		=	0644,
5160
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
5161 5162 5163
	},
	{
		.procname	=	"gc_min_interval_ms",
5164
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
L
Linus Torvalds 已提交
5165 5166
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5167
		.proc_handler	=	proc_dointvec_ms_jiffies,
L
Linus Torvalds 已提交
5168
	},
5169
	{ }
L
Linus Torvalds 已提交
5170 5171
};

5172
struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
5173 5174 5175 5176 5177 5178
{
	struct ctl_table *table;

	table = kmemdup(ipv6_route_table_template,
			sizeof(ipv6_route_table_template),
			GFP_KERNEL);
5179 5180 5181

	if (table) {
		table[0].data = &net->ipv6.sysctl.flush_delay;
5182
		table[0].extra1 = net;
5183
		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
5184 5185 5186 5187 5188 5189 5190
		table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
		table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
		table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
		table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
5191
		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5192 5193 5194 5195

		/* Don't export sysctls to unprivileged users */
		if (net->user_ns != &init_user_ns)
			table[0].procname = NULL;
5196 5197
	}

5198 5199
	return table;
}
L
Linus Torvalds 已提交
5200 5201
#endif

5202
static int __net_init ip6_route_net_init(struct net *net)
5203
{
5204
	int ret = -ENOMEM;
5205

5206 5207
	memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
	       sizeof(net->ipv6.ip6_dst_ops));
5208

5209 5210 5211
	if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
		goto out_ip6_dst_ops;

D
David Ahern 已提交
5212 5213 5214 5215 5216 5217
	net->ipv6.fib6_null_entry = kmemdup(&fib6_null_entry_template,
					    sizeof(*net->ipv6.fib6_null_entry),
					    GFP_KERNEL);
	if (!net->ipv6.fib6_null_entry)
		goto out_ip6_dst_entries;

5218 5219 5220 5221
	net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
					   sizeof(*net->ipv6.ip6_null_entry),
					   GFP_KERNEL);
	if (!net->ipv6.ip6_null_entry)
D
David Ahern 已提交
5222
		goto out_fib6_null_entry;
5223
	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5224 5225
	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
			 ip6_template_metrics, true);
5226 5227

#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5228
	net->ipv6.fib6_has_custom_rules = false;
5229 5230 5231
	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
					       sizeof(*net->ipv6.ip6_prohibit_entry),
					       GFP_KERNEL);
5232 5233
	if (!net->ipv6.ip6_prohibit_entry)
		goto out_ip6_null_entry;
5234
	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5235 5236
	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
			 ip6_template_metrics, true);
5237 5238 5239 5240

	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
					       sizeof(*net->ipv6.ip6_blk_hole_entry),
					       GFP_KERNEL);
5241 5242
	if (!net->ipv6.ip6_blk_hole_entry)
		goto out_ip6_prohibit_entry;
5243
	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5244 5245
	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
			 ip6_template_metrics, true);
5246 5247
#endif

5248 5249 5250 5251 5252 5253 5254 5255 5256
	net->ipv6.sysctl.flush_delay = 0;
	net->ipv6.sysctl.ip6_rt_max_size = 4096;
	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
	net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;

5257 5258
	net->ipv6.ip6_rt_gc_expire = 30*HZ;

5259 5260 5261
	ret = 0;
out:
	return ret;
5262

5263 5264 5265 5266 5267 5268
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
out_ip6_prohibit_entry:
	kfree(net->ipv6.ip6_prohibit_entry);
out_ip6_null_entry:
	kfree(net->ipv6.ip6_null_entry);
#endif
D
David Ahern 已提交
5269 5270
out_fib6_null_entry:
	kfree(net->ipv6.fib6_null_entry);
5271 5272
out_ip6_dst_entries:
	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5273 5274
out_ip6_dst_ops:
	goto out;
5275 5276
}

5277
static void __net_exit ip6_route_net_exit(struct net *net)
5278
{
D
David Ahern 已提交
5279
	kfree(net->ipv6.fib6_null_entry);
5280 5281 5282 5283 5284
	kfree(net->ipv6.ip6_null_entry);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
	kfree(net->ipv6.ip6_prohibit_entry);
	kfree(net->ipv6.ip6_blk_hole_entry);
#endif
5285
	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5286 5287
}

5288 5289 5290
static int __net_init ip6_route_net_init_late(struct net *net)
{
#ifdef CONFIG_PROC_FS
5291 5292
	proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
			sizeof(struct ipv6_route_iter));
5293 5294
	proc_create_net_single("rt6_stats", 0444, net->proc_net,
			rt6_stats_seq_show, NULL);
5295 5296 5297 5298 5299 5300 5301
#endif
	return 0;
}

static void __net_exit ip6_route_net_exit_late(struct net *net)
{
#ifdef CONFIG_PROC_FS
5302 5303
	remove_proc_entry("ipv6_route", net->proc_net);
	remove_proc_entry("rt6_stats", net->proc_net);
5304 5305 5306
#endif
}

5307 5308 5309 5310 5311
static struct pernet_operations ip6_route_net_ops = {
	.init = ip6_route_net_init,
	.exit = ip6_route_net_exit,
};

5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327
static int __net_init ipv6_inetpeer_init(struct net *net)
{
	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);

	if (!bp)
		return -ENOMEM;
	inet_peer_base_init(bp);
	net->ipv6.peers = bp;
	return 0;
}

static void __net_exit ipv6_inetpeer_exit(struct net *net)
{
	struct inet_peer_base *bp = net->ipv6.peers;

	net->ipv6.peers = NULL;
5328
	inetpeer_invalidate_tree(bp);
5329 5330 5331
	kfree(bp);
}

5332
static struct pernet_operations ipv6_inetpeer_ops = {
5333 5334 5335 5336
	.init	=	ipv6_inetpeer_init,
	.exit	=	ipv6_inetpeer_exit,
};

5337 5338 5339 5340 5341
static struct pernet_operations ip6_route_net_late_ops = {
	.init = ip6_route_net_init_late,
	.exit = ip6_route_net_exit_late,
};

5342 5343
static struct notifier_block ip6_route_dev_notifier = {
	.notifier_call = ip6_route_dev_notify,
5344
	.priority = ADDRCONF_NOTIFY_PRIORITY - 10,
5345 5346
};

5347 5348 5349 5350 5351
void __init ip6_route_init_special_entries(void)
{
	/* Registering of the loopback is done before this portion of code,
	 * the loopback reference in rt6_info will not be taken, do it
	 * manually for init_net */
D
David Ahern 已提交
5352
	init_net.ipv6.fib6_null_entry->fib6_nh.nh_dev = init_net.loopback_dev;
5353 5354 5355 5356 5357 5358 5359 5360 5361 5362
	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
  #ifdef CONFIG_IPV6_MULTIPLE_TABLES
	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
  #endif
}

5363
int __init ip6_route_init(void)
L
Linus Torvalds 已提交
5364
{
5365
	int ret;
5366
	int cpu;
5367

5368 5369
	ret = -ENOMEM;
	ip6_dst_ops_template.kmem_cachep =
A
Alexey Dobriyan 已提交
5370
		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
5371
				  SLAB_HWCACHE_ALIGN, NULL);
5372
	if (!ip6_dst_ops_template.kmem_cachep)
5373
		goto out;
5374

5375
	ret = dst_entries_init(&ip6_dst_blackhole_ops);
5376
	if (ret)
5377 5378
		goto out_kmem_cache;

5379 5380
	ret = register_pernet_subsys(&ipv6_inetpeer_ops);
	if (ret)
5381
		goto out_dst_entries;
5382

5383 5384 5385
	ret = register_pernet_subsys(&ip6_route_net_ops);
	if (ret)
		goto out_register_inetpeer;
5386

5387 5388
	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;

5389
	ret = fib6_init();
5390
	if (ret)
5391
		goto out_register_subsys;
5392 5393 5394

	ret = xfrm6_init();
	if (ret)
5395
		goto out_fib6_init;
5396

5397 5398 5399
	ret = fib6_rules_init();
	if (ret)
		goto xfrm6_init;
5400

5401 5402 5403 5404
	ret = register_pernet_subsys(&ip6_route_net_late_ops);
	if (ret)
		goto fib6_rules_init;

5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418
	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
				   inet6_rtm_newroute, NULL, 0);
	if (ret < 0)
		goto out_register_late_subsys;

	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
				   inet6_rtm_delroute, NULL, 0);
	if (ret < 0)
		goto out_register_late_subsys;

	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
				   inet6_rtm_getroute, NULL,
				   RTNL_FLAG_DOIT_UNLOCKED);
	if (ret < 0)
5419
		goto out_register_late_subsys;
5420

5421
	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
5422
	if (ret)
5423
		goto out_register_late_subsys;
5424

5425 5426 5427 5428 5429 5430 5431
	for_each_possible_cpu(cpu) {
		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);

		INIT_LIST_HEAD(&ul->head);
		spin_lock_init(&ul->lock);
	}

5432 5433 5434
out:
	return ret;

5435
out_register_late_subsys:
5436
	rtnl_unregister_all(PF_INET6);
5437
	unregister_pernet_subsys(&ip6_route_net_late_ops);
5438 5439 5440 5441
fib6_rules_init:
	fib6_rules_cleanup();
xfrm6_init:
	xfrm6_fini();
5442 5443
out_fib6_init:
	fib6_gc_cleanup();
5444 5445
out_register_subsys:
	unregister_pernet_subsys(&ip6_route_net_ops);
5446 5447
out_register_inetpeer:
	unregister_pernet_subsys(&ipv6_inetpeer_ops);
5448 5449
out_dst_entries:
	dst_entries_destroy(&ip6_dst_blackhole_ops);
5450
out_kmem_cache:
5451
	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
5452
	goto out;
L
Linus Torvalds 已提交
5453 5454 5455 5456
}

void ip6_route_cleanup(void)
{
5457
	unregister_netdevice_notifier(&ip6_route_dev_notifier);
5458
	unregister_pernet_subsys(&ip6_route_net_late_ops);
T
Thomas Graf 已提交
5459
	fib6_rules_cleanup();
L
Linus Torvalds 已提交
5460 5461
	xfrm6_fini();
	fib6_gc_cleanup();
5462
	unregister_pernet_subsys(&ipv6_inetpeer_ops);
5463
	unregister_pernet_subsys(&ip6_route_net_ops);
5464
	dst_entries_destroy(&ip6_dst_blackhole_ops);
5465
	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
L
Linus Torvalds 已提交
5466
}