route.c 133.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *	Linux INET6 implementation
 *	FIB front-end.
 *
 *	Authors:
6
 *	Pedro Roque		<roque@di.fc.ul.pt>
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 *	This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

/*	Changes:
 *
 *	YOSHIFUJI Hideaki @USAGI
 *		reworked default router selection.
 *		- respect outgoing interface
 *		- select from (probably) reachable routers (i.e.
 *		routers in REACHABLE, STALE, DELAY or PROBE states).
 *		- always select the same router if it is (probably)
 *		reachable.  otherwise, round-robin the list.
23 24
 *	Ville Nuorvala
 *		Fixed routing subtrees.
L
Linus Torvalds 已提交
25 26
 */

27 28
#define pr_fmt(fmt) "IPv6: " fmt

29
#include <linux/capability.h>
L
Linus Torvalds 已提交
30
#include <linux/errno.h>
31
#include <linux/export.h>
L
Linus Torvalds 已提交
32 33 34 35 36 37 38 39
#include <linux/types.h>
#include <linux/times.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/route.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
40
#include <linux/mroute6.h>
L
Linus Torvalds 已提交
41 42 43 44
#include <linux/init.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
45
#include <linux/nsproxy.h>
46
#include <linux/slab.h>
47
#include <linux/jhash.h>
48
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
49 50 51 52 53 54 55 56 57
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/tcp.h>
#include <linux/rtnetlink.h>
#include <net/dst.h>
58
#include <net/dst_metadata.h>
L
Linus Torvalds 已提交
59
#include <net/xfrm.h>
60
#include <net/netevent.h>
61
#include <net/netlink.h>
62
#include <net/nexthop.h>
63
#include <net/lwtunnel.h>
64
#include <net/ip_tunnels.h>
D
David Ahern 已提交
65
#include <net/l3mdev.h>
66
#include <net/ip.h>
67
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
68 69 70 71 72

#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif

73 74 75 76 77 78 79
static int ip6_rt_type_to_error(u8 fib6_type);

#define CREATE_TRACE_POINTS
#include <trace/events/fib6.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
#undef CREATE_TRACE_POINTS

80
enum rt6_nud_state {
J
Jiri Benc 已提交
81 82 83
	RT6_NUD_FAIL_HARD = -3,
	RT6_NUD_FAIL_PROBE = -2,
	RT6_NUD_FAIL_DO_RR = -1,
84 85 86
	RT6_NUD_SUCCEED = 1
};

L
Linus Torvalds 已提交
87
static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
88
static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
89
static unsigned int	 ip6_mtu(const struct dst_entry *dst);
L
Linus Torvalds 已提交
90 91 92 93
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void		ip6_dst_destroy(struct dst_entry *);
static void		ip6_dst_ifdown(struct dst_entry *,
				       struct net_device *dev, int how);
94
static int		 ip6_dst_gc(struct dst_ops *ops);
L
Linus Torvalds 已提交
95 96

static int		ip6_pkt_discard(struct sk_buff *skb);
E
Eric W. Biederman 已提交
97
static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
98
static int		ip6_pkt_prohibit(struct sk_buff *skb);
E
Eric W. Biederman 已提交
99
static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
L
Linus Torvalds 已提交
100
static void		ip6_link_failure(struct sk_buff *skb);
101 102 103 104
static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
					   struct sk_buff *skb, u32 mtu);
static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
					struct sk_buff *skb);
105 106
static int rt6_score_route(struct fib6_info *rt, int oif, int strict);
static size_t rt6_nlmsg_size(struct fib6_info *rt);
107
static int rt6_fill_node(struct net *net, struct sk_buff *skb,
108
			 struct fib6_info *rt, struct dst_entry *dst,
109
			 struct in6_addr *dest, struct in6_addr *src,
110 111
			 int iif, int type, u32 portid, u32 seq,
			 unsigned int flags);
112
static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
113 114
					   struct in6_addr *daddr,
					   struct in6_addr *saddr);
L
Linus Torvalds 已提交
115

116
#ifdef CONFIG_IPV6_ROUTE_INFO
117
static struct fib6_info *rt6_add_route_info(struct net *net,
118
					   const struct in6_addr *prefix, int prefixlen,
119 120
					   const struct in6_addr *gwaddr,
					   struct net_device *dev,
121
					   unsigned int pref);
122
static struct fib6_info *rt6_get_route_info(struct net *net,
123
					   const struct in6_addr *prefix, int prefixlen,
124 125
					   const struct in6_addr *gwaddr,
					   struct net_device *dev);
126 127
#endif

128 129 130 131 132 133 134
struct uncached_list {
	spinlock_t		lock;
	struct list_head	head;
};

static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);

135
void rt6_uncached_list_add(struct rt6_info *rt)
136 137 138 139 140 141 142 143 144 145
{
	struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);

	rt->rt6i_uncached_list = ul;

	spin_lock_bh(&ul->lock);
	list_add_tail(&rt->rt6i_uncached, &ul->head);
	spin_unlock_bh(&ul->lock);
}

146
void rt6_uncached_list_del(struct rt6_info *rt)
147 148 149
{
	if (!list_empty(&rt->rt6i_uncached)) {
		struct uncached_list *ul = rt->rt6i_uncached_list;
W
Wei Wang 已提交
150
		struct net *net = dev_net(rt->dst.dev);
151 152 153

		spin_lock_bh(&ul->lock);
		list_del(&rt->rt6i_uncached);
W
Wei Wang 已提交
154
		atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
155 156 157 158 159 160 161 162 163
		spin_unlock_bh(&ul->lock);
	}
}

static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
{
	struct net_device *loopback_dev = net->loopback_dev;
	int cpu;

164 165 166
	if (dev == loopback_dev)
		return;

167 168 169 170 171 172 173 174 175
	for_each_possible_cpu(cpu) {
		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
		struct rt6_info *rt;

		spin_lock_bh(&ul->lock);
		list_for_each_entry(rt, &ul->head, rt6i_uncached) {
			struct inet6_dev *rt_idev = rt->rt6i_idev;
			struct net_device *rt_dev = rt->dst.dev;

176
			if (rt_idev->dev == dev) {
177 178 179 180
				rt->rt6i_idev = in6_dev_get(loopback_dev);
				in6_dev_put(rt_idev);
			}

181
			if (rt_dev == dev) {
182 183 184 185 186 187 188 189 190
				rt->dst.dev = loopback_dev;
				dev_hold(rt->dst.dev);
				dev_put(rt_dev);
			}
		}
		spin_unlock_bh(&ul->lock);
	}
}

191
static inline const void *choose_neigh_daddr(const struct in6_addr *p,
192 193
					     struct sk_buff *skb,
					     const void *daddr)
194
{
D
David S. Miller 已提交
195
	if (!ipv6_addr_any(p))
196
		return (const void *) p;
197 198
	else if (skb)
		return &ipv6_hdr(skb)->daddr;
199 200 201
	return daddr;
}

202 203 204 205
struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
				   struct net_device *dev,
				   struct sk_buff *skb,
				   const void *daddr)
206
{
207 208
	struct neighbour *n;

209 210
	daddr = choose_neigh_daddr(gw, skb, daddr);
	n = __ipv6_neigh_lookup(dev, daddr);
211 212
	if (n)
		return n;
213 214 215

	n = neigh_create(&nd_tbl, daddr, dev);
	return IS_ERR(n) ? NULL : n;
216 217 218 219 220 221 222 223 224
}

static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
					      struct sk_buff *skb,
					      const void *daddr)
{
	const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);

	return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr);
225 226
}

227 228 229 230 231
static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
	struct net_device *dev = dst->dev;
	struct rt6_info *rt = (struct rt6_info *)dst;

232
	daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr);
233 234 235 236 237 238 239 240 241
	if (!daddr)
		return;
	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
		return;
	if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
		return;
	__ipv6_confirm_neigh(dev, daddr);
}

242
static struct dst_ops ip6_dst_ops_template = {
L
Linus Torvalds 已提交
243 244 245 246
	.family			=	AF_INET6,
	.gc			=	ip6_dst_gc,
	.gc_thresh		=	1024,
	.check			=	ip6_dst_check,
247
	.default_advmss		=	ip6_default_advmss,
248
	.mtu			=	ip6_mtu,
249
	.cow_metrics		=	dst_cow_metrics_generic,
L
Linus Torvalds 已提交
250 251 252 253 254
	.destroy		=	ip6_dst_destroy,
	.ifdown			=	ip6_dst_ifdown,
	.negative_advice	=	ip6_negative_advice,
	.link_failure		=	ip6_link_failure,
	.update_pmtu		=	ip6_rt_update_pmtu,
255
	.redirect		=	rt6_do_redirect,
256
	.local_out		=	__ip6_local_out,
257
	.neigh_lookup		=	ip6_dst_neigh_lookup,
258
	.confirm_neigh		=	ip6_confirm_neigh,
L
Linus Torvalds 已提交
259 260
};

261
static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
262
{
263 264 265
	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);

	return mtu ? : dst->dev->mtu;
266 267
}

268 269
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
					 struct sk_buff *skb, u32 mtu)
270 271 272
{
}

273 274
static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
				      struct sk_buff *skb)
275 276 277
{
}

278 279 280 281
static struct dst_ops ip6_dst_blackhole_ops = {
	.family			=	AF_INET6,
	.destroy		=	ip6_dst_destroy,
	.check			=	ip6_dst_check,
282
	.mtu			=	ip6_blackhole_mtu,
283
	.default_advmss		=	ip6_default_advmss,
284
	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
285
	.redirect		=	ip6_rt_blackhole_redirect,
286
	.cow_metrics		=	dst_cow_metrics_generic,
287
	.neigh_lookup		=	ip6_dst_neigh_lookup,
288 289
};

290
static const u32 ip6_template_metrics[RTAX_MAX] = {
L
Li RongQing 已提交
291
	[RTAX_HOPLIMIT - 1] = 0,
292 293
};

294
static const struct fib6_info fib6_null_entry_template = {
295 296 297 298
	.fib6_flags	= (RTF_REJECT | RTF_NONEXTHOP),
	.fib6_protocol  = RTPROT_KERNEL,
	.fib6_metric	= ~(u32)0,
	.fib6_ref	= ATOMIC_INIT(1),
D
David Ahern 已提交
299 300 301 302
	.fib6_type	= RTN_UNREACHABLE,
	.fib6_metrics	= (struct dst_metrics *)&dst_default_metrics,
};

303
static const struct rt6_info ip6_null_entry_template = {
304 305 306
	.dst = {
		.__refcnt	= ATOMIC_INIT(1),
		.__use		= 1,
307
		.obsolete	= DST_OBSOLETE_FORCE_CHK,
308 309 310
		.error		= -ENETUNREACH,
		.input		= ip6_pkt_discard,
		.output		= ip6_pkt_discard_out,
L
Linus Torvalds 已提交
311 312 313 314
	},
	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
};

T
Thomas Graf 已提交
315 316
#ifdef CONFIG_IPV6_MULTIPLE_TABLES

317
static const struct rt6_info ip6_prohibit_entry_template = {
318 319 320
	.dst = {
		.__refcnt	= ATOMIC_INIT(1),
		.__use		= 1,
321
		.obsolete	= DST_OBSOLETE_FORCE_CHK,
322 323 324
		.error		= -EACCES,
		.input		= ip6_pkt_prohibit,
		.output		= ip6_pkt_prohibit_out,
T
Thomas Graf 已提交
325 326 327 328
	},
	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
};

329
static const struct rt6_info ip6_blk_hole_entry_template = {
330 331 332
	.dst = {
		.__refcnt	= ATOMIC_INIT(1),
		.__use		= 1,
333
		.obsolete	= DST_OBSOLETE_FORCE_CHK,
334 335
		.error		= -EINVAL,
		.input		= dst_discard,
E
Eric W. Biederman 已提交
336
		.output		= dst_discard_out,
T
Thomas Graf 已提交
337 338 339 340 341 342
	},
	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
};

#endif

343 344 345 346 347 348 349 350
static void rt6_info_init(struct rt6_info *rt)
{
	struct dst_entry *dst = &rt->dst;

	memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
	INIT_LIST_HEAD(&rt->rt6i_uncached);
}

L
Linus Torvalds 已提交
351
/* allocate dst with ip6_dst_ops */
352 353
struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
			       int flags)
L
Linus Torvalds 已提交
354
{
355
	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
W
Wei Wang 已提交
356
					1, DST_OBSOLETE_FORCE_CHK, flags);
357

W
Wei Wang 已提交
358
	if (rt) {
359
		rt6_info_init(rt);
W
Wei Wang 已提交
360 361
		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
	}
362

363
	return rt;
L
Linus Torvalds 已提交
364
}
365
EXPORT_SYMBOL(ip6_dst_alloc);
M
Martin KaFai Lau 已提交
366

L
Linus Torvalds 已提交
367 368
static void ip6_dst_destroy(struct dst_entry *dst)
{
369
	struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
L
Linus Torvalds 已提交
370
	struct rt6_info *rt = (struct rt6_info *)dst;
371
	struct fib6_info *from;
372
	struct inet6_dev *idev;
L
Linus Torvalds 已提交
373

374 375 376
	if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
		kfree(p);

377 378 379
	rt6_uncached_list_del(rt);

	idev = rt->rt6i_idev;
380
	if (idev) {
L
Linus Torvalds 已提交
381 382
		rt->rt6i_idev = NULL;
		in6_dev_put(idev);
383
	}
384

385 386 387
	rcu_read_lock();
	from = rcu_dereference(rt->from);
	rcu_assign_pointer(rt->from, NULL);
388
	fib6_info_release(from);
389
	rcu_read_unlock();
390 391
}

L
Linus Torvalds 已提交
392 393 394 395 396
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
			   int how)
{
	struct rt6_info *rt = (struct rt6_info *)dst;
	struct inet6_dev *idev = rt->rt6i_idev;
397
	struct net_device *loopback_dev =
398
		dev_net(dev)->loopback_dev;
L
Linus Torvalds 已提交
399

400 401 402 403 404
	if (idev && idev->dev != loopback_dev) {
		struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
		if (loopback_idev) {
			rt->rt6i_idev = loopback_idev;
			in6_dev_put(idev);
405
		}
L
Linus Torvalds 已提交
406 407 408
	}
}

409 410 411 412 413 414 415 416
static bool __rt6_check_expired(const struct rt6_info *rt)
{
	if (rt->rt6i_flags & RTF_EXPIRES)
		return time_after(jiffies, rt->dst.expires);
	else
		return false;
}

417
static bool rt6_check_expired(const struct rt6_info *rt)
L
Linus Torvalds 已提交
418
{
419 420 421 422
	struct fib6_info *from;

	from = rcu_dereference(rt->from);

423 424
	if (rt->rt6i_flags & RTF_EXPIRES) {
		if (time_after(jiffies, rt->dst.expires))
425
			return true;
426
	} else if (from) {
427
		return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
428
			fib6_check_expired(from);
429
	}
430
	return false;
L
Linus Torvalds 已提交
431 432
}

433 434 435 436 437
struct fib6_info *fib6_multipath_select(const struct net *net,
					struct fib6_info *match,
					struct flowi6 *fl6, int oif,
					const struct sk_buff *skb,
					int strict)
438
{
439
	struct fib6_info *sibling, *next_sibling;
440

441 442 443 444
	/* We might have already computed the hash for ICMPv6 errors. In such
	 * case it will always be non-zero. Otherwise now is the time to do it.
	 */
	if (!fl6->mp_hash)
445
		fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
446

447
	if (fl6->mp_hash <= atomic_read(&match->fib6_nh.nh_upper_bound))
448 449
		return match;

450 451
	list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
				 fib6_siblings) {
452 453 454 455
		int nh_upper_bound;

		nh_upper_bound = atomic_read(&sibling->fib6_nh.nh_upper_bound);
		if (fl6->mp_hash > nh_upper_bound)
456 457 458 459 460 461 462
			continue;
		if (rt6_score_route(sibling, oif, strict) < 0)
			break;
		match = sibling;
		break;
	}

463 464 465
	return match;
}

L
Linus Torvalds 已提交
466
/*
467
 *	Route lookup. rcu_read_lock() should be held.
L
Linus Torvalds 已提交
468 469
 */

470 471
static inline struct fib6_info *rt6_device_match(struct net *net,
						 struct fib6_info *rt,
472
						    const struct in6_addr *saddr,
L
Linus Torvalds 已提交
473
						    int oif,
474
						    int flags)
L
Linus Torvalds 已提交
475
{
476
	struct fib6_info *sprt;
L
Linus Torvalds 已提交
477

478 479
	if (!oif && ipv6_addr_any(saddr) &&
	    !(rt->fib6_nh.nh_flags & RTNH_F_DEAD))
480
		return rt;
481

482
	for (sprt = rt; sprt; sprt = rcu_dereference(sprt->fib6_next)) {
483
		const struct net_device *dev = sprt->fib6_nh.nh_dev;
484

485
		if (sprt->fib6_nh.nh_flags & RTNH_F_DEAD)
486 487
			continue;

488
		if (oif) {
L
Linus Torvalds 已提交
489 490
			if (dev->ifindex == oif)
				return sprt;
491 492 493 494
		} else {
			if (ipv6_chk_addr(net, saddr, dev,
					  flags & RT6_LOOKUP_F_IFACE))
				return sprt;
L
Linus Torvalds 已提交
495
		}
496
	}
L
Linus Torvalds 已提交
497

498 499
	if (oif && flags & RT6_LOOKUP_F_IFACE)
		return net->ipv6.fib6_null_entry;
500

D
David Ahern 已提交
501
	return rt->fib6_nh.nh_flags & RTNH_F_DEAD ? net->ipv6.fib6_null_entry : rt;
L
Linus Torvalds 已提交
502 503
}

504
#ifdef CONFIG_IPV6_ROUTER_PREF
505 506 507 508 509 510 511 512 513 514 515 516 517
struct __rt6_probe_work {
	struct work_struct work;
	struct in6_addr target;
	struct net_device *dev;
};

static void rt6_probe_deferred(struct work_struct *w)
{
	struct in6_addr mcaddr;
	struct __rt6_probe_work *work =
		container_of(w, struct __rt6_probe_work, work);

	addrconf_addr_solict_mult(&work->target, &mcaddr);
518
	ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
519
	dev_put(work->dev);
520
	kfree(work);
521 522
}

523
static void rt6_probe(struct fib6_info *rt)
524
{
525
	struct __rt6_probe_work *work = NULL;
526
	const struct in6_addr *nh_gw;
527
	struct neighbour *neigh;
528
	struct net_device *dev;
529
	struct inet6_dev *idev;
530

531 532 533 534 535 536 537 538
	/*
	 * Okay, this does not seem to be appropriate
	 * for now, however, we need to check if it
	 * is really so; aka Router Reachability Probing.
	 *
	 * Router Reachability Probe MUST be rate-limited
	 * to no more than one per minute.
	 */
539
	if (!rt || !(rt->fib6_flags & RTF_GATEWAY))
540
		return;
541 542 543

	nh_gw = &rt->fib6_nh.nh_gw;
	dev = rt->fib6_nh.nh_dev;
544
	rcu_read_lock_bh();
545
	idev = __in6_dev_get(dev);
546
	neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
547
	if (neigh) {
548 549 550
		if (neigh->nud_state & NUD_VALID)
			goto out;

551
		write_lock(&neigh->lock);
552 553
		if (!(neigh->nud_state & NUD_VALID) &&
		    time_after(jiffies,
D
David Ahern 已提交
554
			       neigh->updated + idev->cnf.rtr_probe_interval)) {
555 556 557
			work = kmalloc(sizeof(*work), GFP_ATOMIC);
			if (work)
				__neigh_set_probe_once(neigh);
558
		}
559
		write_unlock(&neigh->lock);
560 561
	} else if (time_after(jiffies, rt->last_probe +
				       idev->cnf.rtr_probe_interval)) {
562
		work = kmalloc(sizeof(*work), GFP_ATOMIC);
563
	}
564 565

	if (work) {
566
		rt->last_probe = jiffies;
567
		INIT_WORK(&work->work, rt6_probe_deferred);
568 569 570
		work->target = *nh_gw;
		dev_hold(dev);
		work->dev = dev;
571 572 573
		schedule_work(&work->work);
	}

574
out:
575
	rcu_read_unlock_bh();
576 577
}
#else
578
static inline void rt6_probe(struct fib6_info *rt)
579 580 581 582
{
}
#endif

L
Linus Torvalds 已提交
583
/*
584
 * Default Router Selection (RFC 2461 6.3.6)
L
Linus Torvalds 已提交
585
 */
586
static inline int rt6_check_dev(struct fib6_info *rt, int oif)
587
{
588 589
	const struct net_device *dev = rt->fib6_nh.nh_dev;

590
	if (!oif || dev->ifindex == oif)
591
		return 2;
592
	return 0;
593
}
L
Linus Torvalds 已提交
594

595
static inline enum rt6_nud_state rt6_check_neigh(struct fib6_info *rt)
L
Linus Torvalds 已提交
596
{
597
	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
598
	struct neighbour *neigh;
599

600 601
	if (rt->fib6_flags & RTF_NONEXTHOP ||
	    !(rt->fib6_flags & RTF_GATEWAY))
602
		return RT6_NUD_SUCCEED;
603 604

	rcu_read_lock_bh();
605 606
	neigh = __ipv6_neigh_lookup_noref(rt->fib6_nh.nh_dev,
					  &rt->fib6_nh.nh_gw);
607 608
	if (neigh) {
		read_lock(&neigh->lock);
609
		if (neigh->nud_state & NUD_VALID)
610
			ret = RT6_NUD_SUCCEED;
611
#ifdef CONFIG_IPV6_ROUTER_PREF
612
		else if (!(neigh->nud_state & NUD_FAILED))
613
			ret = RT6_NUD_SUCCEED;
J
Jiri Benc 已提交
614 615
		else
			ret = RT6_NUD_FAIL_PROBE;
616
#endif
617
		read_unlock(&neigh->lock);
618 619
	} else {
		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
J
Jiri Benc 已提交
620
		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
621
	}
622 623
	rcu_read_unlock_bh();

624
	return ret;
L
Linus Torvalds 已提交
625 626
}

627
static int rt6_score_route(struct fib6_info *rt, int oif, int strict)
L
Linus Torvalds 已提交
628
{
629
	int m;
630

631
	m = rt6_check_dev(rt, oif);
632
	if (!m && (strict & RT6_LOOKUP_F_IFACE))
633
		return RT6_NUD_FAIL_HARD;
634
#ifdef CONFIG_IPV6_ROUTER_PREF
635
	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->fib6_flags)) << 2;
636
#endif
637 638 639 640 641
	if (strict & RT6_LOOKUP_F_REACHABLE) {
		int n = rt6_check_neigh(rt);
		if (n < 0)
			return n;
	}
642 643 644
	return m;
}

D
David Ahern 已提交
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
/* called with rc_read_lock held */
static inline bool fib6_ignore_linkdown(const struct fib6_info *f6i)
{
	const struct net_device *dev = fib6_info_nh_dev(f6i);
	bool rc = false;

	if (dev) {
		const struct inet6_dev *idev = __in6_dev_get(dev);

		rc = !!idev->cnf.ignore_routes_with_linkdown;
	}

	return rc;
}

660 661
static struct fib6_info *find_match(struct fib6_info *rt, int oif, int strict,
				   int *mpri, struct fib6_info *match,
662
				   bool *do_rr)
663
{
664
	int m;
665
	bool match_do_rr = false;
666

667
	if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
668 669
		goto out;

D
David Ahern 已提交
670
	if (fib6_ignore_linkdown(rt) &&
671
	    rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
672
	    !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
673
		goto out;
674

675
	if (fib6_check_expired(rt))
676 677 678
		goto out;

	m = rt6_score_route(rt, oif, strict);
J
Jiri Benc 已提交
679
	if (m == RT6_NUD_FAIL_DO_RR) {
680 681
		match_do_rr = true;
		m = 0; /* lowest valid score */
J
Jiri Benc 已提交
682
	} else if (m == RT6_NUD_FAIL_HARD) {
683
		goto out;
684 685 686 687
	}

	if (strict & RT6_LOOKUP_F_REACHABLE)
		rt6_probe(rt);
688

J
Jiri Benc 已提交
689
	/* note that m can be RT6_NUD_FAIL_PROBE at this point */
690
	if (m > *mpri) {
691
		*do_rr = match_do_rr;
692 693 694 695 696 697 698
		*mpri = m;
		match = rt;
	}
out:
	return match;
}

699 700 701
static struct fib6_info *find_rr_leaf(struct fib6_node *fn,
				     struct fib6_info *leaf,
				     struct fib6_info *rr_head,
702 703
				     u32 metric, int oif, int strict,
				     bool *do_rr)
704
{
705
	struct fib6_info *rt, *match, *cont;
706
	int mpri = -1;
L
Linus Torvalds 已提交
707

708
	match = NULL;
709
	cont = NULL;
710
	for (rt = rr_head; rt; rt = rcu_dereference(rt->fib6_next)) {
711
		if (rt->fib6_metric != metric) {
712 713 714 715 716 717 718
			cont = rt;
			break;
		}

		match = find_match(rt, oif, strict, &mpri, match, do_rr);
	}

719
	for (rt = leaf; rt && rt != rr_head;
720
	     rt = rcu_dereference(rt->fib6_next)) {
721
		if (rt->fib6_metric != metric) {
722 723 724 725
			cont = rt;
			break;
		}

726
		match = find_match(rt, oif, strict, &mpri, match, do_rr);
727 728 729 730 731
	}

	if (match || !cont)
		return match;

732
	for (rt = cont; rt; rt = rcu_dereference(rt->fib6_next))
733
		match = find_match(rt, oif, strict, &mpri, match, do_rr);
L
Linus Torvalds 已提交
734

735 736
	return match;
}
L
Linus Torvalds 已提交
737

738
static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn,
W
Wei Wang 已提交
739
				   int oif, int strict)
740
{
741 742
	struct fib6_info *leaf = rcu_dereference(fn->leaf);
	struct fib6_info *match, *rt0;
743
	bool do_rr = false;
744
	int key_plen;
L
Linus Torvalds 已提交
745

D
David Ahern 已提交
746 747
	if (!leaf || leaf == net->ipv6.fib6_null_entry)
		return net->ipv6.fib6_null_entry;
W
Wei Wang 已提交
748

749
	rt0 = rcu_dereference(fn->rr_ptr);
750
	if (!rt0)
751
		rt0 = leaf;
L
Linus Torvalds 已提交
752

753 754 755 756 757
	/* Double check to make sure fn is not an intermediate node
	 * and fn->leaf does not points to its child's leaf
	 * (This might happen if all routes under fn are deleted from
	 * the tree and fib6_repair_tree() is called on the node.)
	 */
758
	key_plen = rt0->fib6_dst.plen;
759
#ifdef CONFIG_IPV6_SUBTREES
760 761
	if (rt0->fib6_src.plen)
		key_plen = rt0->fib6_src.plen;
762 763
#endif
	if (fn->fn_bit != key_plen)
D
David Ahern 已提交
764
		return net->ipv6.fib6_null_entry;
765

766
	match = find_rr_leaf(fn, leaf, rt0, rt0->fib6_metric, oif, strict,
767
			     &do_rr);
L
Linus Torvalds 已提交
768

769
	if (do_rr) {
770
		struct fib6_info *next = rcu_dereference(rt0->fib6_next);
771

772
		/* no entries matched; do round-robin */
773
		if (!next || next->fib6_metric != rt0->fib6_metric)
W
Wei Wang 已提交
774
			next = leaf;
775

776
		if (next != rt0) {
777
			spin_lock_bh(&leaf->fib6_table->tb6_lock);
778
			/* make sure next is not being deleted from the tree */
779
			if (next->fib6_node)
780
				rcu_assign_pointer(fn->rr_ptr, next);
781
			spin_unlock_bh(&leaf->fib6_table->tb6_lock);
782
		}
L
Linus Torvalds 已提交
783 784
	}

D
David Ahern 已提交
785
	return match ? match : net->ipv6.fib6_null_entry;
L
Linus Torvalds 已提交
786 787
}

788
static bool rt6_is_gw_or_nonexthop(const struct fib6_info *rt)
789
{
790
	return (rt->fib6_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
791 792
}

793 794
#ifdef CONFIG_IPV6_ROUTE_INFO
int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
795
		  const struct in6_addr *gwaddr)
796
{
797
	struct net *net = dev_net(dev);
798 799 800
	struct route_info *rinfo = (struct route_info *) opt;
	struct in6_addr prefix_buf, *prefix;
	unsigned int pref;
801
	unsigned long lifetime;
802
	struct fib6_info *rt;
803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824

	if (len < sizeof(struct route_info)) {
		return -EINVAL;
	}

	/* Sanity check for prefix_len and length */
	if (rinfo->length > 3) {
		return -EINVAL;
	} else if (rinfo->prefix_len > 128) {
		return -EINVAL;
	} else if (rinfo->prefix_len > 64) {
		if (rinfo->length < 2) {
			return -EINVAL;
		}
	} else if (rinfo->prefix_len > 0) {
		if (rinfo->length < 1) {
			return -EINVAL;
		}
	}

	pref = rinfo->route_pref;
	if (pref == ICMPV6_ROUTER_PREF_INVALID)
825
		return -EINVAL;
826

827
	lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
828 829 830 831 832 833 834 835 836 837 838

	if (rinfo->length == 3)
		prefix = (struct in6_addr *)rinfo->prefix;
	else {
		/* this function is safe */
		ipv6_addr_prefix(&prefix_buf,
				 (struct in6_addr *)rinfo->prefix,
				 rinfo->prefix_len);
		prefix = &prefix_buf;
	}

839
	if (rinfo->prefix_len == 0)
840
		rt = rt6_get_dflt_router(net, gwaddr, dev);
841 842
	else
		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
843
					gwaddr, dev);
844 845

	if (rt && !lifetime) {
846
		ip6_del_rt(net, rt);
847 848 849 850
		rt = NULL;
	}

	if (!rt && lifetime)
851 852
		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
					dev, pref);
853
	else if (rt)
854 855
		rt->fib6_flags = RTF_ROUTEINFO |
				 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
856 857

	if (rt) {
858
		if (!addrconf_finite_timeout(lifetime))
859
			fib6_clean_expires(rt);
860
		else
861
			fib6_set_expires(rt, jiffies + HZ * lifetime);
862

863
		fib6_info_release(rt);
864 865 866 867 868
	}
	return 0;
}
#endif

869 870 871 872 873
/*
 *	Misc support functions
 */

/* called with rcu_lock held */
874
static struct net_device *ip6_rt_get_dev_rcu(struct fib6_info *rt)
875
{
876
	struct net_device *dev = rt->fib6_nh.nh_dev;
877

878
	if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
879 880 881 882 883
		/* for copies of local routes, dst->dev needs to be the
		 * device if it is a master device, the master device if
		 * device is enslaved, and the loopback as the default
		 */
		if (netif_is_l3_slave(dev) &&
884
		    !rt6_need_strict(&rt->fib6_dst.addr))
885 886 887 888 889 890 891 892 893 894 895
			dev = l3mdev_master_dev_rcu(dev);
		else if (!netif_is_l3_master(dev))
			dev = dev_net(dev)->loopback_dev;
		/* last case is netif_is_l3_master(dev) is true in which
		 * case we want dev returned to be dev
		 */
	}

	return dev;
}

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
static const int fib6_prop[RTN_MAX + 1] = {
	[RTN_UNSPEC]	= 0,
	[RTN_UNICAST]	= 0,
	[RTN_LOCAL]	= 0,
	[RTN_BROADCAST]	= 0,
	[RTN_ANYCAST]	= 0,
	[RTN_MULTICAST]	= 0,
	[RTN_BLACKHOLE]	= -EINVAL,
	[RTN_UNREACHABLE] = -EHOSTUNREACH,
	[RTN_PROHIBIT]	= -EACCES,
	[RTN_THROW]	= -EAGAIN,
	[RTN_NAT]	= -EINVAL,
	[RTN_XRESOLVE]	= -EINVAL,
};

static int ip6_rt_type_to_error(u8 fib6_type)
{
	return fib6_prop[fib6_type];
}

916
static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
917 918 919 920 921 922 923 924 925 926 927 928 929
{
	unsigned short flags = 0;

	if (rt->dst_nocount)
		flags |= DST_NOCOUNT;
	if (rt->dst_nopolicy)
		flags |= DST_NOPOLICY;
	if (rt->dst_host)
		flags |= DST_HOST;

	return flags;
}

930
static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
{
	rt->dst.error = ip6_rt_type_to_error(ort->fib6_type);

	switch (ort->fib6_type) {
	case RTN_BLACKHOLE:
		rt->dst.output = dst_discard_out;
		rt->dst.input = dst_discard;
		break;
	case RTN_PROHIBIT:
		rt->dst.output = ip6_pkt_prohibit_out;
		rt->dst.input = ip6_pkt_prohibit;
		break;
	case RTN_THROW:
	case RTN_UNREACHABLE:
	default:
		rt->dst.output = ip6_pkt_discard_out;
		rt->dst.input = ip6_pkt_discard;
		break;
	}
}

952
static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
953
{
954
	if (ort->fib6_flags & RTF_REJECT) {
955 956 957 958 959 960 961
		ip6_rt_init_dst_reject(rt, ort);
		return;
	}

	rt->dst.error = 0;
	rt->dst.output = ip6_output;

962
	if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
963
		rt->dst.input = ip6_input;
964
	} else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
965 966 967 968 969 970 971 972 973 974 975 976 977
		rt->dst.input = ip6_mc_input;
	} else {
		rt->dst.input = ip6_forward;
	}

	if (ort->fib6_nh.nh_lwtstate) {
		rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
		lwtunnel_set_redirect(&rt->dst);
	}

	rt->dst.lastuse = jiffies;
}

978
/* Caller must already hold reference to @from */
979
static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
980 981
{
	rt->rt6i_flags &= ~RTF_EXPIRES;
982
	rcu_assign_pointer(rt->from, from);
983
	dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
984 985 986 987
	if (from->fib6_metrics != &dst_default_metrics) {
		rt->dst._metrics |= DST_METRICS_REFCOUNTED;
		refcount_inc(&from->fib6_metrics->refcnt);
	}
988 989
}

990
/* Caller must already hold reference to @ort */
991
static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
992
{
D
David Ahern 已提交
993 994
	struct net_device *dev = fib6_info_nh_dev(ort);

995 996
	ip6_rt_init_dst(rt, ort);

997
	rt->rt6i_dst = ort->fib6_dst;
D
David Ahern 已提交
998
	rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
999
	rt->rt6i_gateway = ort->fib6_nh.nh_gw;
1000
	rt->rt6i_flags = ort->fib6_flags;
1001 1002
	rt6_set_from(rt, ort);
#ifdef CONFIG_IPV6_SUBTREES
1003
	rt->rt6i_src = ort->fib6_src;
1004
#endif
1005
	rt->rt6i_prefsrc = ort->fib6_prefsrc;
1006 1007
}

M
Martin KaFai Lau 已提交
1008 1009 1010
static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
					struct in6_addr *saddr)
{
1011
	struct fib6_node *pn, *sn;
M
Martin KaFai Lau 已提交
1012 1013 1014
	while (1) {
		if (fn->fn_flags & RTN_TL_ROOT)
			return NULL;
1015 1016 1017
		pn = rcu_dereference(fn->parent);
		sn = FIB6_SUBTREE(pn);
		if (sn && sn != fn)
1018
			fn = fib6_node_lookup(sn, NULL, saddr);
M
Martin KaFai Lau 已提交
1019 1020 1021 1022 1023 1024
		else
			fn = pn;
		if (fn->fn_flags & RTN_RTINFO)
			return fn;
	}
}
T
Thomas Graf 已提交
1025

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
static bool ip6_hold_safe(struct net *net, struct rt6_info **prt,
			  bool null_fallback)
{
	struct rt6_info *rt = *prt;

	if (dst_hold_safe(&rt->dst))
		return true;
	if (null_fallback) {
		rt = net->ipv6.ip6_null_entry;
		dst_hold(&rt->dst);
	} else {
		rt = NULL;
	}
	*prt = rt;
	return false;
}

1043
/* called with rcu_lock held */
1044
static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
1045
{
1046
	unsigned short flags = fib6_info_dst_flags(rt);
1047 1048 1049
	struct net_device *dev = rt->fib6_nh.nh_dev;
	struct rt6_info *nrt;

1050 1051 1052
	if (!fib6_info_hold_safe(rt))
		return NULL;

1053
	nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1054 1055
	if (nrt)
		ip6_rt_copy_init(nrt, rt);
1056 1057
	else
		fib6_info_release(rt);
1058 1059 1060 1061

	return nrt;
}

1062 1063
static struct rt6_info *ip6_pol_route_lookup(struct net *net,
					     struct fib6_table *table,
D
David Ahern 已提交
1064 1065 1066
					     struct flowi6 *fl6,
					     const struct sk_buff *skb,
					     int flags)
L
Linus Torvalds 已提交
1067
{
1068
	struct fib6_info *f6i;
L
Linus Torvalds 已提交
1069
	struct fib6_node *fn;
1070
	struct rt6_info *rt;
L
Linus Torvalds 已提交
1071

1072 1073 1074
	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
		flags &= ~RT6_LOOKUP_F_IFACE;

1075
	rcu_read_lock();
1076
	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
T
Thomas Graf 已提交
1077
restart:
1078 1079 1080
	f6i = rcu_dereference(fn->leaf);
	if (!f6i) {
		f6i = net->ipv6.fib6_null_entry;
1081
	} else {
1082
		f6i = rt6_device_match(net, f6i, &fl6->saddr,
1083
				      fl6->flowi6_oif, flags);
1084
		if (f6i->fib6_nsiblings && fl6->flowi6_oif == 0)
1085 1086 1087
			f6i = fib6_multipath_select(net, f6i, fl6,
						    fl6->flowi6_oif, skb,
						    flags);
1088
	}
1089
	if (f6i == net->ipv6.fib6_null_entry) {
M
Martin KaFai Lau 已提交
1090 1091 1092 1093
		fn = fib6_backtrack(fn, &fl6->saddr);
		if (fn)
			goto restart;
	}
1094

1095
	trace_fib6_table_lookup(net, f6i, table, fl6);
1096

1097
	/* Search through exception table */
1098 1099
	rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr);
	if (rt) {
1100 1101
		if (ip6_hold_safe(net, &rt, true))
			dst_use_noref(&rt->dst, jiffies);
1102
	} else if (f6i == net->ipv6.fib6_null_entry) {
1103 1104
		rt = net->ipv6.ip6_null_entry;
		dst_hold(&rt->dst);
1105 1106 1107 1108 1109 1110
	} else {
		rt = ip6_create_rt_rcu(f6i);
		if (!rt) {
			rt = net->ipv6.ip6_null_entry;
			dst_hold(&rt->dst);
		}
1111
	}
D
David Ahern 已提交
1112

1113
	rcu_read_unlock();
D
David Ahern 已提交
1114

T
Thomas Graf 已提交
1115 1116 1117
	return rt;
}

1118
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
D
David Ahern 已提交
1119
				   const struct sk_buff *skb, int flags)
F
Florian Westphal 已提交
1120
{
D
David Ahern 已提交
1121
	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
F
Florian Westphal 已提交
1122 1123 1124
}
EXPORT_SYMBOL_GPL(ip6_route_lookup);

1125
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
D
David Ahern 已提交
1126 1127
			    const struct in6_addr *saddr, int oif,
			    const struct sk_buff *skb, int strict)
T
Thomas Graf 已提交
1128
{
1129 1130 1131
	struct flowi6 fl6 = {
		.flowi6_oif = oif,
		.daddr = *daddr,
T
Thomas Graf 已提交
1132 1133
	};
	struct dst_entry *dst;
1134
	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
T
Thomas Graf 已提交
1135

1136
	if (saddr) {
1137
		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1138 1139 1140
		flags |= RT6_LOOKUP_F_HAS_SADDR;
	}

D
David Ahern 已提交
1141
	dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
T
Thomas Graf 已提交
1142 1143 1144 1145 1146
	if (dst->error == 0)
		return (struct rt6_info *) dst;

	dst_release(dst);

L
Linus Torvalds 已提交
1147 1148
	return NULL;
}
1149 1150
EXPORT_SYMBOL(rt6_lookup);

T
Thomas Graf 已提交
1151
/* ip6_ins_rt is called with FREE table->tb6_lock.
1152 1153 1154
 * It takes new route entry, the addition fails by any reason the
 * route is released.
 * Caller must hold dst before calling it.
L
Linus Torvalds 已提交
1155 1156
 */

1157
static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1158
			struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
1159 1160
{
	int err;
T
Thomas Graf 已提交
1161
	struct fib6_table *table;
L
Linus Torvalds 已提交
1162

1163
	table = rt->fib6_table;
1164
	spin_lock_bh(&table->tb6_lock);
1165
	err = fib6_add(&table->tb6_root, rt, info, extack);
1166
	spin_unlock_bh(&table->tb6_lock);
L
Linus Torvalds 已提交
1167 1168 1169 1170

	return err;
}

1171
int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1172
{
1173
	struct nl_info info = {	.nl_net = net, };
1174

1175
	return __ip6_ins_rt(rt, &info, NULL);
1176 1177
}

1178
static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
1179 1180
					   const struct in6_addr *daddr,
					   const struct in6_addr *saddr)
L
Linus Torvalds 已提交
1181
{
1182
	struct net_device *dev;
L
Linus Torvalds 已提交
1183 1184 1185 1186 1187 1188
	struct rt6_info *rt;

	/*
	 *	Clone the route.
	 */

1189 1190 1191
	if (!fib6_info_hold_safe(ort))
		return NULL;

1192
	dev = ip6_rt_get_dev_rcu(ort);
1193
	rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1194 1195
	if (!rt) {
		fib6_info_release(ort);
M
Martin KaFai Lau 已提交
1196
		return NULL;
1197
	}
M
Martin KaFai Lau 已提交
1198 1199 1200 1201 1202 1203

	ip6_rt_copy_init(rt, ort);
	rt->rt6i_flags |= RTF_CACHE;
	rt->dst.flags |= DST_HOST;
	rt->rt6i_dst.addr = *daddr;
	rt->rt6i_dst.plen = 128;
L
Linus Torvalds 已提交
1204

M
Martin KaFai Lau 已提交
1205
	if (!rt6_is_gw_or_nonexthop(ort)) {
1206 1207
		if (ort->fib6_dst.plen != 128 &&
		    ipv6_addr_equal(&ort->fib6_dst.addr, daddr))
M
Martin KaFai Lau 已提交
1208
			rt->rt6i_flags |= RTF_ANYCAST;
L
Linus Torvalds 已提交
1209
#ifdef CONFIG_IPV6_SUBTREES
M
Martin KaFai Lau 已提交
1210 1211 1212
		if (rt->rt6i_src.plen && saddr) {
			rt->rt6i_src.addr = *saddr;
			rt->rt6i_src.plen = 128;
1213
		}
M
Martin KaFai Lau 已提交
1214
#endif
1215
	}
L
Linus Torvalds 已提交
1216

1217 1218
	return rt;
}
L
Linus Torvalds 已提交
1219

1220
static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt)
M
Martin KaFai Lau 已提交
1221
{
1222
	unsigned short flags = fib6_info_dst_flags(rt);
1223
	struct net_device *dev;
M
Martin KaFai Lau 已提交
1224 1225
	struct rt6_info *pcpu_rt;

1226 1227 1228
	if (!fib6_info_hold_safe(rt))
		return NULL;

1229 1230
	rcu_read_lock();
	dev = ip6_rt_get_dev_rcu(rt);
1231
	pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1232
	rcu_read_unlock();
1233 1234
	if (!pcpu_rt) {
		fib6_info_release(rt);
M
Martin KaFai Lau 已提交
1235
		return NULL;
1236
	}
M
Martin KaFai Lau 已提交
1237 1238 1239 1240 1241
	ip6_rt_copy_init(pcpu_rt, rt);
	pcpu_rt->rt6i_flags |= RTF_PCPU;
	return pcpu_rt;
}

1242
/* It should be called with rcu_read_lock() acquired */
1243
static struct rt6_info *rt6_get_pcpu_route(struct fib6_info *rt)
M
Martin KaFai Lau 已提交
1244
{
1245
	struct rt6_info *pcpu_rt, **p;
M
Martin KaFai Lau 已提交
1246 1247 1248 1249

	p = this_cpu_ptr(rt->rt6i_pcpu);
	pcpu_rt = *p;

1250 1251
	if (pcpu_rt)
		ip6_hold_safe(NULL, &pcpu_rt, false);
1252

1253 1254 1255
	return pcpu_rt;
}

1256
static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1257
					    struct fib6_info *rt)
1258 1259
{
	struct rt6_info *pcpu_rt, *prev, **p;
M
Martin KaFai Lau 已提交
1260 1261 1262

	pcpu_rt = ip6_rt_pcpu_alloc(rt);
	if (!pcpu_rt) {
1263 1264
		dst_hold(&net->ipv6.ip6_null_entry->dst);
		return net->ipv6.ip6_null_entry;
M
Martin KaFai Lau 已提交
1265 1266
	}

1267 1268 1269
	dst_hold(&pcpu_rt->dst);
	p = this_cpu_ptr(rt->rt6i_pcpu);
	prev = cmpxchg(p, NULL, pcpu_rt);
1270
	BUG_ON(prev);
1271

M
Martin KaFai Lau 已提交
1272 1273 1274
	return pcpu_rt;
}

1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
/* exception hash table implementation
 */
static DEFINE_SPINLOCK(rt6_exception_lock);

/* Remove rt6_ex from hash table and free the memory
 * Caller must hold rt6_exception_lock
 */
static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
				 struct rt6_exception *rt6_ex)
{
1285
	struct fib6_info *from;
1286
	struct net *net;
W
Wei Wang 已提交
1287

1288 1289
	if (!bucket || !rt6_ex)
		return;
1290 1291

	net = dev_net(rt6_ex->rt6i->dst.dev);
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
	net->ipv6.rt6_stats->fib_rt_cache--;

	/* purge completely the exception to allow releasing the held resources:
	 * some [sk] cache may keep the dst around for unlimited time
	 */
	from = rcu_dereference_protected(rt6_ex->rt6i->from,
					 lockdep_is_held(&rt6_exception_lock));
	rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
	fib6_info_release(from);
	dst_dev_put(&rt6_ex->rt6i->dst);

1303
	hlist_del_rcu(&rt6_ex->hlist);
1304
	dst_release(&rt6_ex->rt6i->dst);
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
	kfree_rcu(rt6_ex, rcu);
	WARN_ON_ONCE(!bucket->depth);
	bucket->depth--;
}

/* Remove oldest rt6_ex in bucket and free the memory
 * Caller must hold rt6_exception_lock
 */
static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
{
	struct rt6_exception *rt6_ex, *oldest = NULL;

	if (!bucket)
		return;

	hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
		if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
			oldest = rt6_ex;
	}
	rt6_remove_exception(bucket, oldest);
}

static u32 rt6_exception_hash(const struct in6_addr *dst,
			      const struct in6_addr *src)
{
	static u32 seed __read_mostly;
	u32 val;

	net_get_random_once(&seed, sizeof(seed));
	val = jhash(dst, sizeof(*dst), seed);

#ifdef CONFIG_IPV6_SUBTREES
	if (src)
		val = jhash(src, sizeof(*src), val);
#endif
	return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
}

/* Helper function to find the cached rt in the hash table
 * and update bucket pointer to point to the bucket for this
 * (daddr, saddr) pair
 * Caller must hold rt6_exception_lock
 */
static struct rt6_exception *
__rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
			      const struct in6_addr *daddr,
			      const struct in6_addr *saddr)
{
	struct rt6_exception *rt6_ex;
	u32 hval;

	if (!(*bucket) || !daddr)
		return NULL;

	hval = rt6_exception_hash(daddr, saddr);
	*bucket += hval;

	hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
		struct rt6_info *rt6 = rt6_ex->rt6i;
		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);

#ifdef CONFIG_IPV6_SUBTREES
		if (matched && saddr)
			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
#endif
		if (matched)
			return rt6_ex;
	}
	return NULL;
}

/* Helper function to find the cached rt in the hash table
 * and update bucket pointer to point to the bucket for this
 * (daddr, saddr) pair
 * Caller must hold rcu_read_lock()
 */
static struct rt6_exception *
__rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
			 const struct in6_addr *daddr,
			 const struct in6_addr *saddr)
{
	struct rt6_exception *rt6_ex;
	u32 hval;

	WARN_ON_ONCE(!rcu_read_lock_held());

	if (!(*bucket) || !daddr)
		return NULL;

	hval = rt6_exception_hash(daddr, saddr);
	*bucket += hval;

	hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
		struct rt6_info *rt6 = rt6_ex->rt6i;
		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);

#ifdef CONFIG_IPV6_SUBTREES
		if (matched && saddr)
			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
#endif
		if (matched)
			return rt6_ex;
	}
	return NULL;
}

1411
static unsigned int fib6_mtu(const struct fib6_info *rt)
1412 1413 1414
{
	unsigned int mtu;

D
David Ahern 已提交
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
	if (rt->fib6_pmtu) {
		mtu = rt->fib6_pmtu;
	} else {
		struct net_device *dev = fib6_info_nh_dev(rt);
		struct inet6_dev *idev;

		rcu_read_lock();
		idev = __in6_dev_get(dev);
		mtu = idev->cnf.mtu6;
		rcu_read_unlock();
	}

1427 1428 1429 1430 1431
	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);

	return mtu - lwtunnel_headroom(rt->fib6_nh.nh_lwtstate, mtu);
}

1432
static int rt6_insert_exception(struct rt6_info *nrt,
1433
				struct fib6_info *ort)
1434
{
1435
	struct net *net = dev_net(nrt->dst.dev);
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
	struct rt6_exception_bucket *bucket;
	struct in6_addr *src_key = NULL;
	struct rt6_exception *rt6_ex;
	int err = 0;

	spin_lock_bh(&rt6_exception_lock);

	if (ort->exception_bucket_flushed) {
		err = -EINVAL;
		goto out;
	}

	bucket = rcu_dereference_protected(ort->rt6i_exception_bucket,
					lockdep_is_held(&rt6_exception_lock));
	if (!bucket) {
		bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
				 GFP_ATOMIC);
		if (!bucket) {
			err = -ENOMEM;
			goto out;
		}
		rcu_assign_pointer(ort->rt6i_exception_bucket, bucket);
	}

#ifdef CONFIG_IPV6_SUBTREES
	/* rt6i_src.plen != 0 indicates ort is in subtree
	 * and exception table is indexed by a hash of
	 * both rt6i_dst and rt6i_src.
	 * Otherwise, the exception table is indexed by
	 * a hash of only rt6i_dst.
	 */
1467
	if (ort->fib6_src.plen)
1468 1469
		src_key = &nrt->rt6i_src.addr;
#endif
1470 1471 1472 1473

	/* Update rt6i_prefsrc as it could be changed
	 * in rt6_remove_prefsrc()
	 */
1474
	nrt->rt6i_prefsrc = ort->fib6_prefsrc;
1475 1476 1477 1478
	/* rt6_mtu_change() might lower mtu on ort.
	 * Only insert this exception route if its mtu
	 * is less than ort's mtu value.
	 */
1479
	if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(ort)) {
1480 1481 1482
		err = -EINVAL;
		goto out;
	}
1483

1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
	rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
					       src_key);
	if (rt6_ex)
		rt6_remove_exception(bucket, rt6_ex);

	rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
	if (!rt6_ex) {
		err = -ENOMEM;
		goto out;
	}
	rt6_ex->rt6i = nrt;
	rt6_ex->stamp = jiffies;
	hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
	bucket->depth++;
W
Wei Wang 已提交
1498
	net->ipv6.rt6_stats->fib_rt_cache++;
1499 1500 1501 1502 1503 1504 1505 1506

	if (bucket->depth > FIB6_MAX_DEPTH)
		rt6_exception_remove_oldest(bucket);

out:
	spin_unlock_bh(&rt6_exception_lock);

	/* Update fn->fn_sernum to invalidate all cached dst */
1507
	if (!err) {
1508
		spin_lock_bh(&ort->fib6_table->tb6_lock);
1509
		fib6_update_sernum(net, ort);
1510
		spin_unlock_bh(&ort->fib6_table->tb6_lock);
1511 1512
		fib6_force_start_gc(net);
	}
1513 1514 1515 1516

	return err;
}

1517
void rt6_flush_exceptions(struct fib6_info *rt)
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	struct hlist_node *tmp;
	int i;

	spin_lock_bh(&rt6_exception_lock);
	/* Prevent rt6_insert_exception() to recreate the bucket list */
	rt->exception_bucket_flushed = 1;

	bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
				    lockdep_is_held(&rt6_exception_lock));
	if (!bucket)
		goto out;

	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
		hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
			rt6_remove_exception(bucket, rt6_ex);
		WARN_ON_ONCE(bucket->depth);
		bucket++;
	}

out:
	spin_unlock_bh(&rt6_exception_lock);
}

/* Find cached rt in the hash table inside passed in rt
 * Caller has to hold rcu_read_lock()
 */
1547
static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
					   struct in6_addr *daddr,
					   struct in6_addr *saddr)
{
	struct rt6_exception_bucket *bucket;
	struct in6_addr *src_key = NULL;
	struct rt6_exception *rt6_ex;
	struct rt6_info *res = NULL;

	bucket = rcu_dereference(rt->rt6i_exception_bucket);

#ifdef CONFIG_IPV6_SUBTREES
	/* rt6i_src.plen != 0 indicates rt is in subtree
	 * and exception table is indexed by a hash of
	 * both rt6i_dst and rt6i_src.
	 * Otherwise, the exception table is indexed by
	 * a hash of only rt6i_dst.
	 */
1565
	if (rt->fib6_src.plen)
1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
		src_key = saddr;
#endif
	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);

	if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
		res = rt6_ex->rt6i;

	return res;
}

/* Remove the passed in cached rt from the hash table that contains it */
1577
static int rt6_remove_exception_rt(struct rt6_info *rt)
1578 1579 1580 1581
{
	struct rt6_exception_bucket *bucket;
	struct in6_addr *src_key = NULL;
	struct rt6_exception *rt6_ex;
1582
	struct fib6_info *from;
1583 1584
	int err;

1585
	from = rcu_dereference(rt->from);
1586
	if (!from ||
1587
	    !(rt->rt6i_flags & RTF_CACHE))
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
		return -EINVAL;

	if (!rcu_access_pointer(from->rt6i_exception_bucket))
		return -ENOENT;

	spin_lock_bh(&rt6_exception_lock);
	bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
				    lockdep_is_held(&rt6_exception_lock));
#ifdef CONFIG_IPV6_SUBTREES
	/* rt6i_src.plen != 0 indicates 'from' is in subtree
	 * and exception table is indexed by a hash of
	 * both rt6i_dst and rt6i_src.
	 * Otherwise, the exception table is indexed by
	 * a hash of only rt6i_dst.
	 */
1603
	if (from->fib6_src.plen)
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
		src_key = &rt->rt6i_src.addr;
#endif
	rt6_ex = __rt6_find_exception_spinlock(&bucket,
					       &rt->rt6i_dst.addr,
					       src_key);
	if (rt6_ex) {
		rt6_remove_exception(bucket, rt6_ex);
		err = 0;
	} else {
		err = -ENOENT;
	}

	spin_unlock_bh(&rt6_exception_lock);
	return err;
}

/* Find rt6_ex which contains the passed in rt cache and
 * refresh its stamp
 */
static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
{
	struct rt6_exception_bucket *bucket;
1626
	struct fib6_info *from = rt->from;
1627 1628 1629 1630
	struct in6_addr *src_key = NULL;
	struct rt6_exception *rt6_ex;

	if (!from ||
1631
	    !(rt->rt6i_flags & RTF_CACHE))
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
		return;

	rcu_read_lock();
	bucket = rcu_dereference(from->rt6i_exception_bucket);

#ifdef CONFIG_IPV6_SUBTREES
	/* rt6i_src.plen != 0 indicates 'from' is in subtree
	 * and exception table is indexed by a hash of
	 * both rt6i_dst and rt6i_src.
	 * Otherwise, the exception table is indexed by
	 * a hash of only rt6i_dst.
	 */
1644
	if (from->fib6_src.plen)
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
		src_key = &rt->rt6i_src.addr;
#endif
	rt6_ex = __rt6_find_exception_rcu(&bucket,
					  &rt->rt6i_dst.addr,
					  src_key);
	if (rt6_ex)
		rt6_ex->stamp = jiffies;

	rcu_read_unlock();
}

1656
static void rt6_exceptions_remove_prefsrc(struct fib6_info *rt)
1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	int i;

	bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
					lockdep_is_held(&rt6_exception_lock));

	if (bucket) {
		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
			hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
				rt6_ex->rt6i->rt6i_prefsrc.plen = 0;
			}
			bucket++;
		}
	}
}

1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
					 struct rt6_info *rt, int mtu)
{
	/* If the new MTU is lower than the route PMTU, this new MTU will be the
	 * lowest MTU in the path: always allow updating the route PMTU to
	 * reflect PMTU decreases.
	 *
	 * If the new MTU is higher, and the route PMTU is equal to the local
	 * MTU, this means the old MTU is the lowest in the path, so allow
	 * updating it: if other nodes now have lower MTUs, PMTU discovery will
	 * handle this.
	 */

	if (dst_mtu(&rt->dst) >= mtu)
		return true;

	if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
		return true;

	return false;
}

static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1698
				       struct fib6_info *rt, int mtu)
1699 1700 1701 1702 1703 1704 1705 1706
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	int i;

	bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
					lockdep_is_held(&rt6_exception_lock));

1707 1708 1709 1710 1711 1712 1713 1714
	if (!bucket)
		return;

	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
			struct rt6_info *entry = rt6_ex->rt6i;

			/* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1715
			 * route), the metrics of its rt->from have already
1716 1717
			 * been updated.
			 */
1718
			if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
1719
			    rt6_mtu_change_route_allowed(idev, entry, mtu))
1720
				dst_metric_set(&entry->dst, RTAX_MTU, mtu);
1721
		}
1722
		bucket++;
1723 1724 1725
	}
}

1726 1727
#define RTF_CACHE_GATEWAY	(RTF_GATEWAY | RTF_CACHE)

1728
static void rt6_exceptions_clean_tohost(struct fib6_info *rt,
1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
					struct in6_addr *gateway)
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	struct hlist_node *tmp;
	int i;

	if (!rcu_access_pointer(rt->rt6i_exception_bucket))
		return;

	spin_lock_bh(&rt6_exception_lock);
	bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
				     lockdep_is_held(&rt6_exception_lock));

	if (bucket) {
		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
			hlist_for_each_entry_safe(rt6_ex, tmp,
						  &bucket->chain, hlist) {
				struct rt6_info *entry = rt6_ex->rt6i;

				if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
				    RTF_CACHE_GATEWAY &&
				    ipv6_addr_equal(gateway,
						    &entry->rt6i_gateway)) {
					rt6_remove_exception(bucket, rt6_ex);
				}
			}
			bucket++;
		}
	}

	spin_unlock_bh(&rt6_exception_lock);
}

1763 1764 1765 1766 1767 1768 1769
static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
				      struct rt6_exception *rt6_ex,
				      struct fib6_gc_args *gc_args,
				      unsigned long now)
{
	struct rt6_info *rt = rt6_ex->rt6i;

1770 1771 1772 1773 1774 1775
	/* we are pruning and obsoleting aged-out and non gateway exceptions
	 * even if others have still references to them, so that on next
	 * dst_check() such references can be dropped.
	 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
	 * expired, independently from their aging, as per RFC 8201 section 4
	 */
W
Wei Wang 已提交
1776 1777 1778 1779 1780 1781 1782 1783
	if (!(rt->rt6i_flags & RTF_EXPIRES)) {
		if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
			RT6_TRACE("aging clone %p\n", rt);
			rt6_remove_exception(bucket, rt6_ex);
			return;
		}
	} else if (time_after(jiffies, rt->dst.expires)) {
		RT6_TRACE("purging expired route %p\n", rt);
1784 1785
		rt6_remove_exception(bucket, rt6_ex);
		return;
W
Wei Wang 已提交
1786 1787 1788
	}

	if (rt->rt6i_flags & RTF_GATEWAY) {
1789 1790 1791
		struct neighbour *neigh;
		__u8 neigh_flags = 0;

1792 1793
		neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
		if (neigh)
1794
			neigh_flags = neigh->flags;
1795

1796 1797 1798 1799 1800 1801 1802
		if (!(neigh_flags & NTF_ROUTER)) {
			RT6_TRACE("purging route %p via non-router but gateway\n",
				  rt);
			rt6_remove_exception(bucket, rt6_ex);
			return;
		}
	}
W
Wei Wang 已提交
1803

1804 1805 1806
	gc_args->more++;
}

1807
void rt6_age_exceptions(struct fib6_info *rt,
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818
			struct fib6_gc_args *gc_args,
			unsigned long now)
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	struct hlist_node *tmp;
	int i;

	if (!rcu_access_pointer(rt->rt6i_exception_bucket))
		return;

1819 1820
	rcu_read_lock_bh();
	spin_lock(&rt6_exception_lock);
1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
	bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
				    lockdep_is_held(&rt6_exception_lock));

	if (bucket) {
		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
			hlist_for_each_entry_safe(rt6_ex, tmp,
						  &bucket->chain, hlist) {
				rt6_age_examine_exception(bucket, rt6_ex,
							  gc_args, now);
			}
			bucket++;
		}
	}
1834 1835
	spin_unlock(&rt6_exception_lock);
	rcu_read_unlock_bh();
1836 1837
}

1838 1839 1840
/* must be called with rcu lock held */
struct fib6_info *fib6_table_lookup(struct net *net, struct fib6_table *table,
				    int oif, struct flowi6 *fl6, int strict)
L
Linus Torvalds 已提交
1841
{
1842
	struct fib6_node *fn, *saved_fn;
1843
	struct fib6_info *f6i;
L
Linus Torvalds 已提交
1844

1845
	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1846
	saved_fn = fn;
L
Linus Torvalds 已提交
1847

D
David Ahern 已提交
1848 1849 1850
	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
		oif = 0;

M
Martin KaFai Lau 已提交
1851
redo_rt6_select:
1852 1853
	f6i = rt6_select(net, fn, oif, strict);
	if (f6i == net->ipv6.fib6_null_entry) {
M
Martin KaFai Lau 已提交
1854 1855 1856
		fn = fib6_backtrack(fn, &fl6->saddr);
		if (fn)
			goto redo_rt6_select;
1857 1858 1859 1860 1861 1862
		else if (strict & RT6_LOOKUP_F_REACHABLE) {
			/* also consider unreachable route */
			strict &= ~RT6_LOOKUP_F_REACHABLE;
			fn = saved_fn;
			goto redo_rt6_select;
		}
M
Martin KaFai Lau 已提交
1863 1864
	}

1865
	trace_fib6_table_lookup(net, f6i, table, fl6);
1866

1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888
	return f6i;
}

struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
			       int oif, struct flowi6 *fl6,
			       const struct sk_buff *skb, int flags)
{
	struct fib6_info *f6i;
	struct rt6_info *rt;
	int strict = 0;

	strict |= flags & RT6_LOOKUP_F_IFACE;
	strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
	if (net->ipv6.devconf_all->forwarding == 0)
		strict |= RT6_LOOKUP_F_REACHABLE;

	rcu_read_lock();

	f6i = fib6_table_lookup(net, table, oif, fl6, strict);
	if (f6i->fib6_nsiblings)
		f6i = fib6_multipath_select(net, f6i, fl6, oif, skb, strict);

1889
	if (f6i == net->ipv6.fib6_null_entry) {
D
David Ahern 已提交
1890
		rt = net->ipv6.ip6_null_entry;
1891
		rcu_read_unlock();
1892 1893
		dst_hold(&rt->dst);
		return rt;
1894 1895 1896 1897 1898
	}

	/*Search through exception table */
	rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr);
	if (rt) {
1899
		if (ip6_hold_safe(net, &rt, true))
1900
			dst_use_noref(&rt->dst, jiffies);
1901

1902
		rcu_read_unlock();
M
Martin KaFai Lau 已提交
1903
		return rt;
1904
	} else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1905
			    !(f6i->fib6_flags & RTF_GATEWAY))) {
1906 1907 1908 1909 1910 1911 1912
		/* Create a RTF_CACHE clone which will not be
		 * owned by the fib6 tree.  It is for the special case where
		 * the daddr in the skb during the neighbor look-up is different
		 * from the fl6->daddr used to look-up route here.
		 */
		struct rt6_info *uncached_rt;

1913
		uncached_rt = ip6_rt_cache_alloc(f6i, &fl6->daddr, NULL);
M
Martin KaFai Lau 已提交
1914

1915
		rcu_read_unlock();
T
Thomas Graf 已提交
1916

1917 1918 1919 1920
		if (uncached_rt) {
			/* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
			 * No need for another dst_hold()
			 */
1921
			rt6_uncached_list_add(uncached_rt);
W
Wei Wang 已提交
1922
			atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
1923
		} else {
1924
			uncached_rt = net->ipv6.ip6_null_entry;
1925 1926
			dst_hold(&uncached_rt->dst);
		}
D
David Ahern 已提交
1927

1928
		return uncached_rt;
M
Martin KaFai Lau 已提交
1929 1930 1931 1932 1933
	} else {
		/* Get a percpu copy */

		struct rt6_info *pcpu_rt;

1934
		local_bh_disable();
1935
		pcpu_rt = rt6_get_pcpu_route(f6i);
M
Martin KaFai Lau 已提交
1936

1937 1938 1939
		if (!pcpu_rt)
			pcpu_rt = rt6_make_pcpu_route(net, f6i);

1940 1941
		local_bh_enable();
		rcu_read_unlock();
1942

M
Martin KaFai Lau 已提交
1943 1944
		return pcpu_rt;
	}
L
Linus Torvalds 已提交
1945
}
1946
EXPORT_SYMBOL_GPL(ip6_pol_route);
L
Linus Torvalds 已提交
1947

D
David Ahern 已提交
1948 1949 1950 1951 1952
static struct rt6_info *ip6_pol_route_input(struct net *net,
					    struct fib6_table *table,
					    struct flowi6 *fl6,
					    const struct sk_buff *skb,
					    int flags)
1953
{
D
David Ahern 已提交
1954
	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
1955 1956
}

1957 1958
struct dst_entry *ip6_route_input_lookup(struct net *net,
					 struct net_device *dev,
D
David Ahern 已提交
1959 1960 1961
					 struct flowi6 *fl6,
					 const struct sk_buff *skb,
					 int flags)
1962 1963 1964 1965
{
	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
		flags |= RT6_LOOKUP_F_IFACE;

D
David Ahern 已提交
1966
	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
1967
}
1968
EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1969

1970
static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1971 1972
				  struct flow_keys *keys,
				  struct flow_keys *flkeys)
1973 1974 1975
{
	const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
	const struct ipv6hdr *key_iph = outer_iph;
1976
	struct flow_keys *_flkeys = flkeys;
1977 1978 1979
	const struct ipv6hdr *inner_iph;
	const struct icmp6hdr *icmph;
	struct ipv6hdr _inner_iph;
1980
	struct icmp6hdr _icmph;
1981 1982 1983 1984

	if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
		goto out;

1985 1986 1987 1988 1989
	icmph = skb_header_pointer(skb, skb_transport_offset(skb),
				   sizeof(_icmph), &_icmph);
	if (!icmph)
		goto out;

1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
	if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
	    icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
	    icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
	    icmph->icmp6_type != ICMPV6_PARAMPROB)
		goto out;

	inner_iph = skb_header_pointer(skb,
				       skb_transport_offset(skb) + sizeof(*icmph),
				       sizeof(_inner_iph), &_inner_iph);
	if (!inner_iph)
		goto out;

	key_iph = inner_iph;
2003
	_flkeys = NULL;
2004
out:
2005 2006 2007 2008 2009 2010 2011 2012
	if (_flkeys) {
		keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
		keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
		keys->tags.flow_label = _flkeys->tags.flow_label;
		keys->basic.ip_proto = _flkeys->basic.ip_proto;
	} else {
		keys->addrs.v6addrs.src = key_iph->saddr;
		keys->addrs.v6addrs.dst = key_iph->daddr;
2013
		keys->tags.flow_label = ip6_flowlabel(key_iph);
2014 2015
		keys->basic.ip_proto = key_iph->nexthdr;
	}
2016 2017 2018
}

/* if skb is set it will be used and fl6 can be NULL */
2019 2020
u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
		       const struct sk_buff *skb, struct flow_keys *flkeys)
2021 2022
{
	struct flow_keys hash_keys;
2023
	u32 mhash;
2024

2025
	switch (ip6_multipath_hash_policy(net)) {
2026 2027 2028 2029 2030 2031 2032 2033
	case 0:
		memset(&hash_keys, 0, sizeof(hash_keys));
		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
		if (skb) {
			ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
		} else {
			hash_keys.addrs.v6addrs.src = fl6->saddr;
			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2034
			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
			hash_keys.basic.ip_proto = fl6->flowi6_proto;
		}
		break;
	case 1:
		if (skb) {
			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
			struct flow_keys keys;

			/* short-circuit if we already have L4 hash present */
			if (skb->l4_hash)
				return skb_get_hash_raw(skb) >> 1;

			memset(&hash_keys, 0, sizeof(hash_keys));

                        if (!flkeys) {
				skb_flow_dissect_flow_keys(skb, &keys, flag);
				flkeys = &keys;
			}
			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
			hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
			hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
			hash_keys.ports.src = flkeys->ports.src;
			hash_keys.ports.dst = flkeys->ports.dst;
			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
		} else {
			memset(&hash_keys, 0, sizeof(hash_keys));
			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
			hash_keys.addrs.v6addrs.src = fl6->saddr;
			hash_keys.addrs.v6addrs.dst = fl6->daddr;
			hash_keys.ports.src = fl6->fl6_sport;
			hash_keys.ports.dst = fl6->fl6_dport;
			hash_keys.basic.ip_proto = fl6->flowi6_proto;
		}
		break;
2069
	}
2070
	mhash = flow_hash_from_keys(&hash_keys);
2071

2072
	return mhash >> 1;
2073 2074
}

T
Thomas Graf 已提交
2075 2076
void ip6_route_input(struct sk_buff *skb)
{
2077
	const struct ipv6hdr *iph = ipv6_hdr(skb);
2078
	struct net *net = dev_net(skb->dev);
2079
	int flags = RT6_LOOKUP_F_HAS_SADDR;
2080
	struct ip_tunnel_info *tun_info;
2081
	struct flowi6 fl6 = {
2082
		.flowi6_iif = skb->dev->ifindex,
2083 2084
		.daddr = iph->daddr,
		.saddr = iph->saddr,
2085
		.flowlabel = ip6_flowinfo(iph),
2086 2087
		.flowi6_mark = skb->mark,
		.flowi6_proto = iph->nexthdr,
T
Thomas Graf 已提交
2088
	};
2089
	struct flow_keys *flkeys = NULL, _flkeys;
2090

2091
	tun_info = skb_tunnel_info(skb);
2092
	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2093
		fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2094 2095 2096 2097

	if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
		flkeys = &_flkeys;

2098
	if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2099
		fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2100
	skb_dst_drop(skb);
D
David Ahern 已提交
2101 2102
	skb_dst_set(skb,
		    ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags));
T
Thomas Graf 已提交
2103 2104
}

D
David Ahern 已提交
2105 2106 2107 2108 2109
static struct rt6_info *ip6_pol_route_output(struct net *net,
					     struct fib6_table *table,
					     struct flowi6 *fl6,
					     const struct sk_buff *skb,
					     int flags)
L
Linus Torvalds 已提交
2110
{
D
David Ahern 已提交
2111
	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
T
Thomas Graf 已提交
2112 2113
}

2114 2115
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
					 struct flowi6 *fl6, int flags)
T
Thomas Graf 已提交
2116
{
2117
	bool any_src;
T
Thomas Graf 已提交
2118

2119 2120 2121 2122 2123 2124 2125
	if (rt6_need_strict(&fl6->daddr)) {
		struct dst_entry *dst;

		dst = l3mdev_link_scope_lookup(net, fl6);
		if (dst)
			return dst;
	}
D
David Ahern 已提交
2126

2127
	fl6->flowi6_iif = LOOPBACK_IFINDEX;
2128

2129
	any_src = ipv6_addr_any(&fl6->saddr);
2130
	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2131
	    (fl6->flowi6_oif && any_src))
2132
		flags |= RT6_LOOKUP_F_IFACE;
T
Thomas Graf 已提交
2133

2134
	if (!any_src)
2135
		flags |= RT6_LOOKUP_F_HAS_SADDR;
2136 2137
	else if (sk)
		flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2138

D
David Ahern 已提交
2139
	return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
L
Linus Torvalds 已提交
2140
}
2141
EXPORT_SYMBOL_GPL(ip6_route_output_flags);
L
Linus Torvalds 已提交
2142

2143
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2144
{
2145
	struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2146
	struct net_device *loopback_dev = net->loopback_dev;
2147 2148
	struct dst_entry *new = NULL;

2149
	rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2150
		       DST_OBSOLETE_DEAD, 0);
2151
	if (rt) {
2152
		rt6_info_init(rt);
W
Wei Wang 已提交
2153
		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2154

2155
		new = &rt->dst;
2156
		new->__use = 1;
2157
		new->input = dst_discard;
E
Eric W. Biederman 已提交
2158
		new->output = dst_discard_out;
2159

2160
		dst_copy_metrics(new, &ort->dst);
2161

2162
		rt->rt6i_idev = in6_dev_get(loopback_dev);
A
Alexey Dobriyan 已提交
2163
		rt->rt6i_gateway = ort->rt6i_gateway;
2164
		rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2165 2166 2167 2168 2169 2170 2171

		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
#ifdef CONFIG_IPV6_SUBTREES
		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
#endif
	}

2172 2173
	dst_release(dst_orig);
	return new ? new : ERR_PTR(-ENOMEM);
2174 2175
}

L
Linus Torvalds 已提交
2176 2177 2178 2179
/*
 *	Destination cache support functions
 */

2180
static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2181
{
2182 2183
	u32 rt_cookie = 0;

2184
	if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2185 2186 2187 2188 2189 2190
		return false;

	if (fib6_check_expired(f6i))
		return false;

	return true;
2191 2192
}

2193 2194 2195
static struct dst_entry *rt6_check(struct rt6_info *rt,
				   struct fib6_info *from,
				   u32 cookie)
2196
{
2197
	u32 rt_cookie = 0;
2198

2199
	if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
2200
	    rt_cookie != cookie)
2201 2202 2203 2204 2205 2206 2207 2208
		return NULL;

	if (rt6_check_expired(rt))
		return NULL;

	return &rt->dst;
}

2209 2210 2211
static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
					    struct fib6_info *from,
					    u32 cookie)
2212
{
2213 2214
	if (!__rt6_check_expired(rt) &&
	    rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2215
	    fib6_check(from, cookie))
2216 2217 2218 2219 2220
		return &rt->dst;
	else
		return NULL;
}

L
Linus Torvalds 已提交
2221 2222
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
{
2223
	struct dst_entry *dst_ret;
2224
	struct fib6_info *from;
L
Linus Torvalds 已提交
2225 2226
	struct rt6_info *rt;

2227 2228 2229
	rt = container_of(dst, struct rt6_info, dst);

	rcu_read_lock();
L
Linus Torvalds 已提交
2230

2231 2232 2233 2234
	/* All IPV6 dsts are created with ->obsolete set to the value
	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
	 * into this function always.
	 */
2235

2236
	from = rcu_dereference(rt->from);
2237

2238 2239 2240
	if (from && (rt->rt6i_flags & RTF_PCPU ||
	    unlikely(!list_empty(&rt->rt6i_uncached))))
		dst_ret = rt6_dst_from_check(rt, from, cookie);
2241
	else
2242
		dst_ret = rt6_check(rt, from, cookie);
2243 2244 2245 2246

	rcu_read_unlock();

	return dst_ret;
L
Linus Torvalds 已提交
2247 2248 2249 2250 2251 2252 2253
}

static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
{
	struct rt6_info *rt = (struct rt6_info *) dst;

	if (rt) {
2254
		if (rt->rt6i_flags & RTF_CACHE) {
2255
			rcu_read_lock();
2256
			if (rt6_check_expired(rt)) {
2257
				rt6_remove_exception_rt(rt);
2258 2259
				dst = NULL;
			}
2260
			rcu_read_unlock();
2261
		} else {
L
Linus Torvalds 已提交
2262
			dst_release(dst);
2263 2264
			dst = NULL;
		}
L
Linus Torvalds 已提交
2265
	}
2266
	return dst;
L
Linus Torvalds 已提交
2267 2268 2269 2270 2271 2272
}

static void ip6_link_failure(struct sk_buff *skb)
{
	struct rt6_info *rt;

2273
	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
L
Linus Torvalds 已提交
2274

E
Eric Dumazet 已提交
2275
	rt = (struct rt6_info *) skb_dst(skb);
L
Linus Torvalds 已提交
2276
	if (rt) {
2277
		rcu_read_lock();
2278
		if (rt->rt6i_flags & RTF_CACHE) {
2279
			rt6_remove_exception_rt(rt);
2280
		} else {
2281
			struct fib6_info *from;
2282 2283
			struct fib6_node *fn;

2284 2285 2286 2287 2288 2289
			from = rcu_dereference(rt->from);
			if (from) {
				fn = rcu_dereference(from->fib6_node);
				if (fn && (rt->rt6i_flags & RTF_DEFAULT))
					fn->fn_sernum = -1;
			}
2290
		}
2291
		rcu_read_unlock();
L
Linus Torvalds 已提交
2292 2293 2294
	}
}

2295 2296
static void rt6_update_expires(struct rt6_info *rt0, int timeout)
{
2297 2298 2299 2300 2301 2302 2303 2304 2305
	if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
		struct fib6_info *from;

		rcu_read_lock();
		from = rcu_dereference(rt0->from);
		if (from)
			rt0->dst.expires = from->expires;
		rcu_read_unlock();
	}
2306 2307 2308 2309 2310

	dst_set_expires(&rt0->dst, timeout);
	rt0->rt6i_flags |= RTF_EXPIRES;
}

2311 2312 2313 2314
static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
{
	struct net *net = dev_net(rt->dst.dev);

2315
	dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2316 2317 2318 2319
	rt->rt6i_flags |= RTF_MODIFIED;
	rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
}

2320 2321
static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
{
2322 2323 2324 2325 2326 2327
	bool from_set;

	rcu_read_lock();
	from_set = !!rcu_dereference(rt->from);
	rcu_read_unlock();

2328
	return !(rt->rt6i_flags & RTF_CACHE) &&
2329
		(rt->rt6i_flags & RTF_PCPU || from_set);
2330 2331
}

2332 2333
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
				 const struct ipv6hdr *iph, u32 mtu)
L
Linus Torvalds 已提交
2334
{
2335
	const struct in6_addr *daddr, *saddr;
2336
	struct rt6_info *rt6 = (struct rt6_info *)dst;
L
Linus Torvalds 已提交
2337

2338 2339 2340
	if (dst_metric_locked(dst, RTAX_MTU))
		return;

2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
	if (iph) {
		daddr = &iph->daddr;
		saddr = &iph->saddr;
	} else if (sk) {
		daddr = &sk->sk_v6_daddr;
		saddr = &inet6_sk(sk)->saddr;
	} else {
		daddr = NULL;
		saddr = NULL;
	}
	dst_confirm_neigh(dst, daddr);
2352 2353 2354
	mtu = max_t(u32, mtu, IPV6_MIN_MTU);
	if (mtu >= dst_mtu(dst))
		return;
2355

2356
	if (!rt6_cache_allowed_for_pmtu(rt6)) {
2357
		rt6_do_update_pmtu(rt6, mtu);
2358 2359 2360
		/* update rt6_ex->stamp for cache */
		if (rt6->rt6i_flags & RTF_CACHE)
			rt6_update_exception_stamp_rt(rt6);
2361
	} else if (daddr) {
2362
		struct fib6_info *from;
2363 2364
		struct rt6_info *nrt6;

2365
		rcu_read_lock();
2366 2367
		from = rcu_dereference(rt6->from);
		nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
2368 2369
		if (nrt6) {
			rt6_do_update_pmtu(nrt6, mtu);
2370
			if (rt6_insert_exception(nrt6, from))
2371
				dst_release_immediate(&nrt6->dst);
2372
		}
2373
		rcu_read_unlock();
L
Linus Torvalds 已提交
2374 2375 2376
	}
}

2377 2378 2379 2380 2381 2382
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
			       struct sk_buff *skb, u32 mtu)
{
	__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
}

2383
void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2384
		     int oif, u32 mark, kuid_t uid)
2385 2386 2387 2388 2389 2390 2391
{
	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
	struct dst_entry *dst;
	struct flowi6 fl6;

	memset(&fl6, 0, sizeof(fl6));
	fl6.flowi6_oif = oif;
2392
	fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
2393 2394
	fl6.daddr = iph->daddr;
	fl6.saddr = iph->saddr;
2395
	fl6.flowlabel = ip6_flowinfo(iph);
2396
	fl6.flowi6_uid = uid;
2397 2398 2399

	dst = ip6_route_output(net, NULL, &fl6);
	if (!dst->error)
2400
		__ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2401 2402 2403 2404 2405 2406
	dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_update_pmtu);

void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
2407
	int oif = sk->sk_bound_dev_if;
2408 2409
	struct dst_entry *dst;

2410 2411 2412 2413
	if (!oif && skb->dev)
		oif = l3mdev_master_ifindex(skb->dev);

	ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2414 2415 2416 2417 2418 2419 2420 2421 2422 2423

	dst = __sk_dst_get(sk);
	if (!dst || !dst->obsolete ||
	    dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
		return;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
		ip6_datagram_dst_update(sk, false);
	bh_unlock_sock(sk);
2424 2425 2426
}
EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);

2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443
void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
			   const struct flowi6 *fl6)
{
#ifdef CONFIG_IPV6_SUBTREES
	struct ipv6_pinfo *np = inet6_sk(sk);
#endif

	ip6_dst_store(sk, dst,
		      ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
		      &sk->sk_v6_daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
		      ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
		      &np->saddr :
#endif
		      NULL);
}

2444 2445 2446 2447 2448 2449 2450 2451 2452
/* Handle redirects */
struct ip6rd_flowi {
	struct flowi6 fl6;
	struct in6_addr gateway;
};

static struct rt6_info *__ip6_route_redirect(struct net *net,
					     struct fib6_table *table,
					     struct flowi6 *fl6,
D
David Ahern 已提交
2453
					     const struct sk_buff *skb,
2454 2455 2456
					     int flags)
{
	struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2457
	struct rt6_info *ret = NULL, *rt_cache;
2458
	struct fib6_info *rt;
2459 2460 2461
	struct fib6_node *fn;

	/* Get the "current" route for this destination and
A
Alexander Alemayhu 已提交
2462
	 * check if the redirect has come from appropriate router.
2463 2464 2465 2466 2467 2468 2469 2470
	 *
	 * RFC 4861 specifies that redirects should only be
	 * accepted if they come from the nexthop to the target.
	 * Due to the way the routes are chosen, this notion
	 * is a bit fuzzy and one might need to check all possible
	 * routes.
	 */

2471
	rcu_read_lock();
2472
	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2473
restart:
2474
	for_each_fib6_node_rt_rcu(fn) {
2475
		if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
2476
			continue;
2477
		if (fib6_check_expired(rt))
2478
			continue;
2479
		if (rt->fib6_flags & RTF_REJECT)
2480
			break;
2481
		if (!(rt->fib6_flags & RTF_GATEWAY))
2482
			continue;
2483
		if (fl6->flowi6_oif != rt->fib6_nh.nh_dev->ifindex)
2484
			continue;
2485 2486 2487 2488 2489
		/* rt_cache's gateway might be different from its 'parent'
		 * in the case of an ip redirect.
		 * So we keep searching in the exception table if the gateway
		 * is different.
		 */
2490
		if (!ipv6_addr_equal(&rdfl->gateway, &rt->fib6_nh.nh_gw)) {
2491 2492 2493 2494 2495 2496
			rt_cache = rt6_find_cached_rt(rt,
						      &fl6->daddr,
						      &fl6->saddr);
			if (rt_cache &&
			    ipv6_addr_equal(&rdfl->gateway,
					    &rt_cache->rt6i_gateway)) {
2497
				ret = rt_cache;
2498 2499
				break;
			}
2500
			continue;
2501
		}
2502 2503 2504 2505
		break;
	}

	if (!rt)
D
David Ahern 已提交
2506
		rt = net->ipv6.fib6_null_entry;
2507
	else if (rt->fib6_flags & RTF_REJECT) {
2508
		ret = net->ipv6.ip6_null_entry;
2509 2510 2511
		goto out;
	}

D
David Ahern 已提交
2512
	if (rt == net->ipv6.fib6_null_entry) {
M
Martin KaFai Lau 已提交
2513 2514 2515
		fn = fib6_backtrack(fn, &fl6->saddr);
		if (fn)
			goto restart;
2516
	}
M
Martin KaFai Lau 已提交
2517

2518
out:
2519
	if (ret)
2520
		ip6_hold_safe(net, &ret, true);
2521 2522
	else
		ret = ip6_create_rt_rcu(rt);
2523

2524
	rcu_read_unlock();
2525

2526
	trace_fib6_table_lookup(net, rt, table, fl6);
2527
	return ret;
2528 2529 2530
};

static struct dst_entry *ip6_route_redirect(struct net *net,
D
David Ahern 已提交
2531 2532 2533
					    const struct flowi6 *fl6,
					    const struct sk_buff *skb,
					    const struct in6_addr *gateway)
2534 2535 2536 2537 2538 2539 2540
{
	int flags = RT6_LOOKUP_F_HAS_SADDR;
	struct ip6rd_flowi rdfl;

	rdfl.fl6 = *fl6;
	rdfl.gateway = *gateway;

D
David Ahern 已提交
2541
	return fib6_rule_lookup(net, &rdfl.fl6, skb,
2542 2543 2544
				flags, __ip6_route_redirect);
}

2545 2546
void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
		  kuid_t uid)
2547 2548 2549 2550 2551 2552
{
	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
	struct dst_entry *dst;
	struct flowi6 fl6;

	memset(&fl6, 0, sizeof(fl6));
2553
	fl6.flowi6_iif = LOOPBACK_IFINDEX;
2554 2555 2556 2557
	fl6.flowi6_oif = oif;
	fl6.flowi6_mark = mark;
	fl6.daddr = iph->daddr;
	fl6.saddr = iph->saddr;
2558
	fl6.flowlabel = ip6_flowinfo(iph);
2559
	fl6.flowi6_uid = uid;
2560

D
David Ahern 已提交
2561
	dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
2562
	rt6_do_redirect(dst, NULL, skb);
2563 2564 2565 2566
	dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_redirect);

2567 2568 2569 2570 2571 2572 2573 2574 2575
void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
			    u32 mark)
{
	const struct ipv6hdr *iph = ipv6_hdr(skb);
	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
	struct dst_entry *dst;
	struct flowi6 fl6;

	memset(&fl6, 0, sizeof(fl6));
2576
	fl6.flowi6_iif = LOOPBACK_IFINDEX;
2577 2578 2579 2580
	fl6.flowi6_oif = oif;
	fl6.flowi6_mark = mark;
	fl6.daddr = msg->dest;
	fl6.saddr = iph->daddr;
2581
	fl6.flowi6_uid = sock_net_uid(net, NULL);
2582

D
David Ahern 已提交
2583
	dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
2584
	rt6_do_redirect(dst, NULL, skb);
2585 2586 2587
	dst_release(dst);
}

2588 2589
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
2590 2591
	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
		     sk->sk_uid);
2592 2593 2594
}
EXPORT_SYMBOL_GPL(ip6_sk_redirect);

2595
static unsigned int ip6_default_advmss(const struct dst_entry *dst)
L
Linus Torvalds 已提交
2596
{
2597 2598 2599 2600
	struct net_device *dev = dst->dev;
	unsigned int mtu = dst_mtu(dst);
	struct net *net = dev_net(dev);

L
Linus Torvalds 已提交
2601 2602
	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);

2603 2604
	if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
		mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
L
Linus Torvalds 已提交
2605 2606

	/*
2607 2608 2609
	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
	 * IPV6_MAXPLEN is also valid and means: "any MSS,
L
Linus Torvalds 已提交
2610 2611 2612 2613 2614 2615 2616
	 * rely only on pmtu discovery"
	 */
	if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
		mtu = IPV6_MAXPLEN;
	return mtu;
}

2617
static unsigned int ip6_mtu(const struct dst_entry *dst)
2618 2619
{
	struct inet6_dev *idev;
2620
	unsigned int mtu;
2621 2622

	mtu = dst_metric_raw(dst, RTAX_MTU);
2623
	if (mtu)
E
Eric Dumazet 已提交
2624
		goto out;
2625 2626

	mtu = IPV6_MIN_MTU;
2627 2628 2629 2630 2631 2632 2633

	rcu_read_lock();
	idev = __in6_dev_get(dst->dev);
	if (idev)
		mtu = idev->cnf.mtu6;
	rcu_read_unlock();

E
Eric Dumazet 已提交
2634
out:
2635 2636 2637
	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);

	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
2638 2639
}

2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687
/* MTU selection:
 * 1. mtu on route is locked - use it
 * 2. mtu from nexthop exception
 * 3. mtu from egress device
 *
 * based on ip6_dst_mtu_forward and exception logic of
 * rt6_find_cached_rt; called with rcu_read_lock
 */
u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
		      struct in6_addr *saddr)
{
	struct rt6_exception_bucket *bucket;
	struct rt6_exception *rt6_ex;
	struct in6_addr *src_key;
	struct inet6_dev *idev;
	u32 mtu = 0;

	if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
		mtu = f6i->fib6_pmtu;
		if (mtu)
			goto out;
	}

	src_key = NULL;
#ifdef CONFIG_IPV6_SUBTREES
	if (f6i->fib6_src.plen)
		src_key = saddr;
#endif

	bucket = rcu_dereference(f6i->rt6i_exception_bucket);
	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
	if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
		mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU);

	if (likely(!mtu)) {
		struct net_device *dev = fib6_info_nh_dev(f6i);

		mtu = IPV6_MIN_MTU;
		idev = __in6_dev_get(dev);
		if (idev && idev->cnf.mtu6 > mtu)
			mtu = idev->cnf.mtu6;
	}

	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
out:
	return mtu - lwtunnel_headroom(fib6_info_nh_lwt(f6i), mtu);
}

2688
struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2689
				  struct flowi6 *fl6)
L
Linus Torvalds 已提交
2690
{
2691
	struct dst_entry *dst;
L
Linus Torvalds 已提交
2692 2693
	struct rt6_info *rt;
	struct inet6_dev *idev = in6_dev_get(dev);
2694
	struct net *net = dev_net(dev);
L
Linus Torvalds 已提交
2695

2696
	if (unlikely(!idev))
E
Eric Dumazet 已提交
2697
		return ERR_PTR(-ENODEV);
L
Linus Torvalds 已提交
2698

2699
	rt = ip6_dst_alloc(net, dev, 0);
2700
	if (unlikely(!rt)) {
L
Linus Torvalds 已提交
2701
		in6_dev_put(idev);
2702
		dst = ERR_PTR(-ENOMEM);
L
Linus Torvalds 已提交
2703 2704 2705
		goto out;
	}

2706
	rt->dst.flags |= DST_HOST;
2707
	rt->dst.input = ip6_input;
2708
	rt->dst.output  = ip6_output;
2709
	rt->rt6i_gateway  = fl6->daddr;
2710
	rt->rt6i_dst.addr = fl6->daddr;
2711 2712
	rt->rt6i_dst.plen = 128;
	rt->rt6i_idev     = idev;
L
Li RongQing 已提交
2713
	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
L
Linus Torvalds 已提交
2714

2715
	/* Add this dst into uncached_list so that rt6_disable_ip() can
2716 2717 2718
	 * do proper release of the net_device
	 */
	rt6_uncached_list_add(rt);
W
Wei Wang 已提交
2719
	atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
L
Linus Torvalds 已提交
2720

2721 2722
	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);

L
Linus Torvalds 已提交
2723
out:
2724
	return dst;
L
Linus Torvalds 已提交
2725 2726
}

2727
static int ip6_dst_gc(struct dst_ops *ops)
L
Linus Torvalds 已提交
2728
{
2729
	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
2730 2731 2732 2733 2734
	int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
	int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
	int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
	int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
	unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2735
	int entries;
2736

2737
	entries = dst_entries_get_fast(ops);
2738
	if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
2739
	    entries <= rt_max_size)
L
Linus Torvalds 已提交
2740 2741
		goto out;

2742
	net->ipv6.ip6_rt_gc_expire++;
2743
	fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2744 2745
	entries = dst_entries_get_slow(ops);
	if (entries < ops->gc_thresh)
2746
		net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
L
Linus Torvalds 已提交
2747
out:
2748
	net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2749
	return entries > rt_max_size;
L
Linus Torvalds 已提交
2750 2751
}

2752
static int ip6_convert_metrics(struct net *net, struct fib6_info *rt,
2753
			       struct fib6_config *cfg)
2754
{
2755
	struct dst_metrics *p;
2756

2757
	if (!cfg->fc_mx)
2758 2759
		return 0;

2760 2761
	p = kzalloc(sizeof(*rt->fib6_metrics), GFP_KERNEL);
	if (unlikely(!p))
2762 2763
		return -ENOMEM;

2764 2765
	refcount_set(&p->refcnt, 1);
	rt->fib6_metrics = p;
2766

2767
	return ip_metrics_convert(net, cfg->fc_mx, cfg->fc_mx_len, p->metrics);
2768
}
L
Linus Torvalds 已提交
2769

2770 2771
static struct rt6_info *ip6_nh_lookup_table(struct net *net,
					    struct fib6_config *cfg,
2772 2773
					    const struct in6_addr *gw_addr,
					    u32 tbid, int flags)
2774 2775 2776 2777 2778 2779 2780 2781 2782
{
	struct flowi6 fl6 = {
		.flowi6_oif = cfg->fc_ifindex,
		.daddr = *gw_addr,
		.saddr = cfg->fc_prefsrc,
	};
	struct fib6_table *table;
	struct rt6_info *rt;

2783
	table = fib6_get_table(net, tbid);
2784 2785 2786 2787 2788 2789
	if (!table)
		return NULL;

	if (!ipv6_addr_any(&cfg->fc_prefsrc))
		flags |= RT6_LOOKUP_F_HAS_SADDR;

2790
	flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
D
David Ahern 已提交
2791
	rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
2792 2793 2794 2795 2796 2797 2798 2799 2800 2801

	/* if table lookup failed, fall back to full lookup */
	if (rt == net->ipv6.ip6_null_entry) {
		ip6_rt_put(rt);
		rt = NULL;
	}

	return rt;
}

2802 2803
static int ip6_route_check_nh_onlink(struct net *net,
				     struct fib6_config *cfg,
2804
				     const struct net_device *dev,
2805 2806
				     struct netlink_ext_ack *extack)
{
2807
	u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
2808 2809 2810 2811 2812 2813 2814 2815
	const struct in6_addr *gw_addr = &cfg->fc_gateway;
	u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
	struct rt6_info *grt;
	int err;

	err = 0;
	grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
	if (grt) {
2816
		if (!grt->dst.error &&
2817 2818
		    /* ignore match if it is the default route */
		    grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) &&
2819
		    (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
2820 2821
			NL_SET_ERR_MSG(extack,
				       "Nexthop has invalid gateway or device mismatch");
2822 2823 2824 2825 2826 2827 2828 2829 2830
			err = -EINVAL;
		}

		ip6_rt_put(grt);
	}

	return err;
}

2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841
static int ip6_route_check_nh(struct net *net,
			      struct fib6_config *cfg,
			      struct net_device **_dev,
			      struct inet6_dev **idev)
{
	const struct in6_addr *gw_addr = &cfg->fc_gateway;
	struct net_device *dev = _dev ? *_dev : NULL;
	struct rt6_info *grt = NULL;
	int err = -EHOSTUNREACH;

	if (cfg->fc_table) {
2842 2843 2844 2845
		int flags = RT6_LOOKUP_F_IFACE;

		grt = ip6_nh_lookup_table(net, cfg, gw_addr,
					  cfg->fc_table, flags);
2846 2847 2848 2849 2850 2851 2852 2853 2854 2855
		if (grt) {
			if (grt->rt6i_flags & RTF_GATEWAY ||
			    (dev && dev != grt->dst.dev)) {
				ip6_rt_put(grt);
				grt = NULL;
			}
		}
	}

	if (!grt)
D
David Ahern 已提交
2856
		grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881

	if (!grt)
		goto out;

	if (dev) {
		if (dev != grt->dst.dev) {
			ip6_rt_put(grt);
			goto out;
		}
	} else {
		*_dev = dev = grt->dst.dev;
		*idev = grt->rt6i_idev;
		dev_hold(dev);
		in6_dev_hold(grt->rt6i_idev);
	}

	if (!(grt->rt6i_flags & RTF_GATEWAY))
		err = 0;

	ip6_rt_put(grt);

out:
	return err;
}

2882 2883 2884 2885 2886 2887
static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
			   struct net_device **_dev, struct inet6_dev **idev,
			   struct netlink_ext_ack *extack)
{
	const struct in6_addr *gw_addr = &cfg->fc_gateway;
	int gwa_type = ipv6_addr_type(gw_addr);
2888
	bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
2889
	const struct net_device *dev = *_dev;
2890
	bool need_addr_check = !dev;
2891 2892 2893 2894 2895 2896 2897
	int err = -EINVAL;

	/* if gw_addr is local we will fail to detect this in case
	 * address is still TENTATIVE (DAD in progress). rt6_lookup()
	 * will return already-added prefix route via interface that
	 * prefix route was assigned to, which might be non-loopback.
	 */
2898 2899 2900
	if (dev &&
	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939
		goto out;
	}

	if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
		/* IPv6 strictly inhibits using not link-local
		 * addresses as nexthop address.
		 * Otherwise, router will not able to send redirects.
		 * It is very good, but in some (rare!) circumstances
		 * (SIT, PtP, NBMA NOARP links) it is handy to allow
		 * some exceptions. --ANK
		 * We allow IPv4-mapped nexthops to support RFC4798-type
		 * addressing
		 */
		if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
			NL_SET_ERR_MSG(extack, "Invalid gateway address");
			goto out;
		}

		if (cfg->fc_flags & RTNH_F_ONLINK)
			err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
		else
			err = ip6_route_check_nh(net, cfg, _dev, idev);

		if (err)
			goto out;
	}

	/* reload in case device was changed */
	dev = *_dev;

	err = -EINVAL;
	if (!dev) {
		NL_SET_ERR_MSG(extack, "Egress device not specified");
		goto out;
	} else if (dev->flags & IFF_LOOPBACK) {
		NL_SET_ERR_MSG(extack,
			       "Egress device can not be loopback device for this route");
		goto out;
	}
2940 2941 2942 2943 2944 2945 2946 2947 2948 2949

	/* if we did not check gw_addr above, do so now that the
	 * egress device has been resolved.
	 */
	if (need_addr_check &&
	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
		goto out;
	}

2950 2951 2952 2953 2954
	err = 0;
out:
	return err;
}

2955
static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
2956
					      gfp_t gfp_flags,
2957
					      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
2958
{
2959
	struct net *net = cfg->fc_nlinfo.nl_net;
2960
	struct fib6_info *rt = NULL;
L
Linus Torvalds 已提交
2961 2962
	struct net_device *dev = NULL;
	struct inet6_dev *idev = NULL;
T
Thomas Graf 已提交
2963
	struct fib6_table *table;
L
Linus Torvalds 已提交
2964
	int addr_type;
2965
	int err = -EINVAL;
L
Linus Torvalds 已提交
2966

2967
	/* RTF_PCPU is an internal flag; can not be set by userspace */
2968 2969
	if (cfg->fc_flags & RTF_PCPU) {
		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
2970
		goto out;
2971
	}
2972

2973 2974 2975 2976 2977 2978
	/* RTF_CACHE is an internal flag; can not be set by userspace */
	if (cfg->fc_flags & RTF_CACHE) {
		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
		goto out;
	}

2979 2980 2981 2982 2983
	if (cfg->fc_type > RTN_MAX) {
		NL_SET_ERR_MSG(extack, "Invalid route type");
		goto out;
	}

2984 2985 2986 2987 2988 2989
	if (cfg->fc_dst_len > 128) {
		NL_SET_ERR_MSG(extack, "Invalid prefix length");
		goto out;
	}
	if (cfg->fc_src_len > 128) {
		NL_SET_ERR_MSG(extack, "Invalid source address length");
2990
		goto out;
2991
	}
L
Linus Torvalds 已提交
2992
#ifndef CONFIG_IPV6_SUBTREES
2993 2994 2995
	if (cfg->fc_src_len) {
		NL_SET_ERR_MSG(extack,
			       "Specifying source address requires IPV6_SUBTREES to be enabled");
2996
		goto out;
2997
	}
L
Linus Torvalds 已提交
2998
#endif
2999
	if (cfg->fc_ifindex) {
L
Linus Torvalds 已提交
3000
		err = -ENODEV;
3001
		dev = dev_get_by_index(net, cfg->fc_ifindex);
L
Linus Torvalds 已提交
3002 3003 3004 3005 3006 3007 3008
		if (!dev)
			goto out;
		idev = in6_dev_get(dev);
		if (!idev)
			goto out;
	}

3009 3010
	if (cfg->fc_metric == 0)
		cfg->fc_metric = IP6_RT_PRIO_USER;
L
Linus Torvalds 已提交
3011

3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026
	if (cfg->fc_flags & RTNH_F_ONLINK) {
		if (!dev) {
			NL_SET_ERR_MSG(extack,
				       "Nexthop device required for onlink");
			err = -ENODEV;
			goto out;
		}

		if (!(dev->flags & IFF_UP)) {
			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
			err = -ENETDOWN;
			goto out;
		}
	}

3027
	err = -ENOBUFS;
3028 3029
	if (cfg->fc_nlinfo.nlh &&
	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3030
		table = fib6_get_table(net, cfg->fc_table);
3031
		if (!table) {
3032
			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3033 3034 3035 3036 3037
			table = fib6_new_table(net, cfg->fc_table);
		}
	} else {
		table = fib6_new_table(net, cfg->fc_table);
	}
3038 3039

	if (!table)
T
Thomas Graf 已提交
3040 3041
		goto out;

3042 3043 3044
	err = -ENOMEM;
	rt = fib6_info_alloc(gfp_flags);
	if (!rt)
L
Linus Torvalds 已提交
3045
		goto out;
3046 3047 3048

	if (cfg->fc_flags & RTF_ADDRCONF)
		rt->dst_nocount = true;
L
Linus Torvalds 已提交
3049

3050 3051
	err = ip6_convert_metrics(net, rt, cfg);
	if (err < 0)
L
Linus Torvalds 已提交
3052 3053
		goto out;

3054
	if (cfg->fc_flags & RTF_EXPIRES)
3055
		fib6_set_expires(rt, jiffies +
3056 3057
				clock_t_to_jiffies(cfg->fc_expires));
	else
3058
		fib6_clean_expires(rt);
L
Linus Torvalds 已提交
3059

3060 3061
	if (cfg->fc_protocol == RTPROT_UNSPEC)
		cfg->fc_protocol = RTPROT_BOOT;
3062
	rt->fib6_protocol = cfg->fc_protocol;
3063 3064

	addr_type = ipv6_addr_type(&cfg->fc_dst);
L
Linus Torvalds 已提交
3065

3066 3067 3068
	if (cfg->fc_encap) {
		struct lwtunnel_state *lwtstate;

3069
		err = lwtunnel_build_state(cfg->fc_encap_type,
3070
					   cfg->fc_encap, AF_INET6, cfg,
3071
					   &lwtstate, extack);
3072 3073
		if (err)
			goto out;
3074
		rt->fib6_nh.nh_lwtstate = lwtstate_get(lwtstate);
3075 3076
	}

3077 3078 3079
	ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
	rt->fib6_dst.plen = cfg->fc_dst_len;
	if (rt->fib6_dst.plen == 128)
3080
		rt->dst_host = true;
3081

L
Linus Torvalds 已提交
3082
#ifdef CONFIG_IPV6_SUBTREES
3083 3084
	ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
	rt->fib6_src.plen = cfg->fc_src_len;
L
Linus Torvalds 已提交
3085 3086
#endif

3087
	rt->fib6_metric = cfg->fc_metric;
3088
	rt->fib6_nh.nh_weight = 1;
L
Linus Torvalds 已提交
3089

3090
	rt->fib6_type = cfg->fc_type;
L
Linus Torvalds 已提交
3091 3092 3093 3094

	/* We cannot add true routes via loopback here,
	   they would result in kernel looping; promote them to reject routes
	 */
3095
	if ((cfg->fc_flags & RTF_REJECT) ||
3096 3097 3098
	    (dev && (dev->flags & IFF_LOOPBACK) &&
	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
	     !(cfg->fc_flags & RTF_LOCAL))) {
L
Linus Torvalds 已提交
3099
		/* hold loopback dev/idev if we haven't done so. */
3100
		if (dev != net->loopback_dev) {
L
Linus Torvalds 已提交
3101 3102 3103 3104
			if (dev) {
				dev_put(dev);
				in6_dev_put(idev);
			}
3105
			dev = net->loopback_dev;
L
Linus Torvalds 已提交
3106 3107 3108 3109 3110 3111 3112
			dev_hold(dev);
			idev = in6_dev_get(dev);
			if (!idev) {
				err = -ENODEV;
				goto out;
			}
		}
3113
		rt->fib6_flags = RTF_REJECT|RTF_NONEXTHOP;
L
Linus Torvalds 已提交
3114 3115 3116
		goto install_route;
	}

3117
	if (cfg->fc_flags & RTF_GATEWAY) {
3118 3119
		err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
		if (err)
3120
			goto out;
L
Linus Torvalds 已提交
3121

3122
		rt->fib6_nh.nh_gw = cfg->fc_gateway;
L
Linus Torvalds 已提交
3123 3124 3125
	}

	err = -ENODEV;
3126
	if (!dev)
L
Linus Torvalds 已提交
3127 3128
		goto out;

3129 3130 3131 3132 3133 3134
	if (idev->cnf.disable_ipv6) {
		NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
		err = -EACCES;
		goto out;
	}

3135 3136 3137 3138 3139 3140
	if (!(dev->flags & IFF_UP)) {
		NL_SET_ERR_MSG(extack, "Nexthop device is not up");
		err = -ENETDOWN;
		goto out;
	}

3141 3142
	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3143
			NL_SET_ERR_MSG(extack, "Invalid source address");
3144 3145 3146
			err = -EINVAL;
			goto out;
		}
3147 3148
		rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
		rt->fib6_prefsrc.plen = 128;
3149
	} else
3150
		rt->fib6_prefsrc.plen = 0;
3151

3152
	rt->fib6_flags = cfg->fc_flags;
L
Linus Torvalds 已提交
3153 3154

install_route:
3155
	if (!(rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3156
	    !netif_carrier_ok(dev))
3157 3158
		rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
	rt->fib6_nh.nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK);
3159
	rt->fib6_nh.nh_dev = dev;
3160
	rt->fib6_table = table;
3161

3162
	cfg->fc_nlinfo.nl_net = dev_net(dev);
3163

D
David Ahern 已提交
3164 3165 3166
	if (idev)
		in6_dev_put(idev);

3167
	return rt;
3168 3169 3170 3171 3172 3173
out:
	if (dev)
		dev_put(dev);
	if (idev)
		in6_dev_put(idev);

3174
	fib6_info_release(rt);
3175
	return ERR_PTR(err);
3176 3177
}

3178
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3179
		  struct netlink_ext_ack *extack)
3180
{
3181
	struct fib6_info *rt;
3182 3183
	int err;

3184
	rt = ip6_route_info_create(cfg, gfp_flags, extack);
3185 3186
	if (IS_ERR(rt))
		return PTR_ERR(rt);
3187

3188
	err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3189
	fib6_info_release(rt);
3190

L
Linus Torvalds 已提交
3191 3192 3193
	return err;
}

3194
static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
L
Linus Torvalds 已提交
3195
{
3196
	struct net *net = info->nl_net;
T
Thomas Graf 已提交
3197
	struct fib6_table *table;
3198
	int err;
L
Linus Torvalds 已提交
3199

D
David Ahern 已提交
3200
	if (rt == net->ipv6.fib6_null_entry) {
3201 3202 3203
		err = -ENOENT;
		goto out;
	}
3204

3205
	table = rt->fib6_table;
3206
	spin_lock_bh(&table->tb6_lock);
3207
	err = fib6_del(rt, info);
3208
	spin_unlock_bh(&table->tb6_lock);
L
Linus Torvalds 已提交
3209

3210
out:
3211
	fib6_info_release(rt);
L
Linus Torvalds 已提交
3212 3213 3214
	return err;
}

3215
int ip6_del_rt(struct net *net, struct fib6_info *rt)
3216
{
3217 3218
	struct nl_info info = { .nl_net = net };

3219
	return __ip6_del_rt(rt, &info);
3220 3221
}

3222
static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3223 3224
{
	struct nl_info *info = &cfg->fc_nlinfo;
3225
	struct net *net = info->nl_net;
3226
	struct sk_buff *skb = NULL;
3227
	struct fib6_table *table;
3228
	int err = -ENOENT;
3229

D
David Ahern 已提交
3230
	if (rt == net->ipv6.fib6_null_entry)
3231
		goto out_put;
3232
	table = rt->fib6_table;
3233
	spin_lock_bh(&table->tb6_lock);
3234

3235
	if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3236
		struct fib6_info *sibling, *next_sibling;
3237

3238 3239 3240 3241 3242
		/* prefer to send a single notification with all hops */
		skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
		if (skb) {
			u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;

3243
			if (rt6_fill_node(net, skb, rt, NULL,
3244 3245 3246 3247 3248 3249 3250 3251
					  NULL, NULL, 0, RTM_DELROUTE,
					  info->portid, seq, 0) < 0) {
				kfree_skb(skb);
				skb = NULL;
			} else
				info->skip_notify = 1;
		}

3252
		list_for_each_entry_safe(sibling, next_sibling,
3253 3254
					 &rt->fib6_siblings,
					 fib6_siblings) {
3255 3256
			err = fib6_del(sibling, info);
			if (err)
3257
				goto out_unlock;
3258 3259 3260 3261
		}
	}

	err = fib6_del(rt, info);
3262
out_unlock:
3263
	spin_unlock_bh(&table->tb6_lock);
3264
out_put:
3265
	fib6_info_release(rt);
3266 3267

	if (skb) {
3268
		rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3269 3270
			    info->nlh, gfp_any());
	}
3271 3272 3273
	return err;
}

3274 3275 3276 3277 3278 3279 3280 3281 3282 3283
static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
{
	int rc = -ESRCH;

	if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
		goto out;

	if (cfg->fc_flags & RTF_GATEWAY &&
	    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
		goto out;
3284 3285

	rc = rt6_remove_exception_rt(rt);
3286 3287 3288 3289
out:
	return rc;
}

3290 3291
static int ip6_route_del(struct fib6_config *cfg,
			 struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
3292
{
3293
	struct rt6_info *rt_cache;
T
Thomas Graf 已提交
3294
	struct fib6_table *table;
3295
	struct fib6_info *rt;
L
Linus Torvalds 已提交
3296 3297 3298
	struct fib6_node *fn;
	int err = -ESRCH;

3299
	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3300 3301
	if (!table) {
		NL_SET_ERR_MSG(extack, "FIB table does not exist");
T
Thomas Graf 已提交
3302
		return err;
3303
	}
T
Thomas Graf 已提交
3304

3305
	rcu_read_lock();
L
Linus Torvalds 已提交
3306

T
Thomas Graf 已提交
3307
	fn = fib6_locate(&table->tb6_root,
3308
			 &cfg->fc_dst, cfg->fc_dst_len,
3309
			 &cfg->fc_src, cfg->fc_src_len,
3310
			 !(cfg->fc_flags & RTF_CACHE));
3311

L
Linus Torvalds 已提交
3312
	if (fn) {
3313
		for_each_fib6_node_rt_rcu(fn) {
3314
			if (cfg->fc_flags & RTF_CACHE) {
3315 3316
				int rc;

3317 3318
				rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst,
							      &cfg->fc_src);
3319 3320
				if (rt_cache) {
					rc = ip6_del_cached_rt(rt_cache, cfg);
3321 3322
					if (rc != -ESRCH) {
						rcu_read_unlock();
3323
						return rc;
3324
					}
3325 3326
				}
				continue;
3327
			}
3328
			if (cfg->fc_ifindex &&
3329 3330
			    (!rt->fib6_nh.nh_dev ||
			     rt->fib6_nh.nh_dev->ifindex != cfg->fc_ifindex))
L
Linus Torvalds 已提交
3331
				continue;
3332
			if (cfg->fc_flags & RTF_GATEWAY &&
3333
			    !ipv6_addr_equal(&cfg->fc_gateway, &rt->fib6_nh.nh_gw))
L
Linus Torvalds 已提交
3334
				continue;
3335
			if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
L
Linus Torvalds 已提交
3336
				continue;
3337
			if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
3338
				continue;
3339 3340
			if (!fib6_info_hold_safe(rt))
				continue;
3341
			rcu_read_unlock();
L
Linus Torvalds 已提交
3342

3343 3344 3345 3346 3347
			/* if gateway was specified only delete the one hop */
			if (cfg->fc_flags & RTF_GATEWAY)
				return __ip6_del_rt(rt, &cfg->fc_nlinfo);

			return __ip6_del_rt_siblings(rt, cfg);
L
Linus Torvalds 已提交
3348 3349
		}
	}
3350
	rcu_read_unlock();
L
Linus Torvalds 已提交
3351 3352 3353 3354

	return err;
}

3355
static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
3356 3357
{
	struct netevent_redirect netevent;
3358 3359 3360 3361
	struct rt6_info *rt, *nrt = NULL;
	struct ndisc_options ndopts;
	struct inet6_dev *in6_dev;
	struct neighbour *neigh;
3362
	struct fib6_info *from;
3363
	struct rd_msg *msg;
3364 3365
	int optlen, on_link;
	u8 *lladdr;
3366

3367
	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
3368
	optlen -= sizeof(*msg);
3369 3370

	if (optlen < 0) {
3371
		net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
3372 3373 3374
		return;
	}

3375
	msg = (struct rd_msg *)icmp6_hdr(skb);
3376

3377
	if (ipv6_addr_is_multicast(&msg->dest)) {
3378
		net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
3379 3380 3381
		return;
	}

3382
	on_link = 0;
3383
	if (ipv6_addr_equal(&msg->dest, &msg->target)) {
3384
		on_link = 1;
3385
	} else if (ipv6_addr_type(&msg->target) !=
3386
		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
3387
		net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401
		return;
	}

	in6_dev = __in6_dev_get(skb->dev);
	if (!in6_dev)
		return;
	if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
		return;

	/* RFC2461 8.1:
	 *	The IP source address of the Redirect MUST be the same as the current
	 *	first-hop router for the specified ICMP Destination Address.
	 */

3402
	if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3403 3404 3405
		net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
		return;
	}
3406 3407

	lladdr = NULL;
3408 3409 3410 3411 3412 3413 3414 3415 3416
	if (ndopts.nd_opts_tgt_lladdr) {
		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
					     skb->dev);
		if (!lladdr) {
			net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
			return;
		}
	}

3417
	rt = (struct rt6_info *) dst;
3418
	if (rt->rt6i_flags & RTF_REJECT) {
3419
		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3420
		return;
3421
	}
3422

3423 3424 3425 3426
	/* Redirect received -> path was valid.
	 * Look, redirects are sent only in response to data packets,
	 * so that this nexthop apparently is reachable. --ANK
	 */
3427
	dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
3428

3429
	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
3430 3431
	if (!neigh)
		return;
3432

L
Linus Torvalds 已提交
3433 3434 3435 3436
	/*
	 *	We have finally decided to accept it.
	 */

3437
	ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
L
Linus Torvalds 已提交
3438 3439 3440
		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
		     NEIGH_UPDATE_F_OVERRIDE|
		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
3441 3442
				     NEIGH_UPDATE_F_ISROUTER)),
		     NDISC_REDIRECT, &ndopts);
L
Linus Torvalds 已提交
3443

3444
	rcu_read_lock();
3445
	from = rcu_dereference(rt->from);
3446 3447 3448
	/* This fib6_info_hold() is safe here because we hold reference to rt
	 * and rt already holds reference to fib6_info.
	 */
3449
	fib6_info_hold(from);
3450
	rcu_read_unlock();
3451 3452

	nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL);
3453
	if (!nrt)
L
Linus Torvalds 已提交
3454 3455 3456 3457 3458 3459
		goto out;

	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
	if (on_link)
		nrt->rt6i_flags &= ~RTF_GATEWAY;

A
Alexey Dobriyan 已提交
3460
	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
L
Linus Torvalds 已提交
3461

3462 3463 3464 3465
	/* No need to remove rt from the exception table if rt is
	 * a cached route because rt6_insert_exception() will
	 * takes care of it
	 */
3466
	if (rt6_insert_exception(nrt, from)) {
3467 3468 3469
		dst_release_immediate(&nrt->dst);
		goto out;
	}
L
Linus Torvalds 已提交
3470

3471 3472
	netevent.old = &rt->dst;
	netevent.new = &nrt->dst;
3473
	netevent.daddr = &msg->dest;
3474
	netevent.neigh = neigh;
3475 3476
	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);

L
Linus Torvalds 已提交
3477
out:
3478
	fib6_info_release(from);
3479
	neigh_release(neigh);
3480 3481
}

3482
#ifdef CONFIG_IPV6_ROUTE_INFO
3483
static struct fib6_info *rt6_get_route_info(struct net *net,
3484
					   const struct in6_addr *prefix, int prefixlen,
3485 3486
					   const struct in6_addr *gwaddr,
					   struct net_device *dev)
3487
{
3488 3489
	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
	int ifindex = dev->ifindex;
3490
	struct fib6_node *fn;
3491
	struct fib6_info *rt = NULL;
T
Thomas Graf 已提交
3492 3493
	struct fib6_table *table;

3494
	table = fib6_get_table(net, tb_id);
3495
	if (!table)
T
Thomas Graf 已提交
3496
		return NULL;
3497

3498
	rcu_read_lock();
3499
	fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3500 3501 3502
	if (!fn)
		goto out;

3503
	for_each_fib6_node_rt_rcu(fn) {
3504
		if (rt->fib6_nh.nh_dev->ifindex != ifindex)
3505
			continue;
3506
		if ((rt->fib6_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
3507
			continue;
3508
		if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
3509
			continue;
3510 3511
		if (!fib6_info_hold_safe(rt))
			continue;
3512 3513 3514
		break;
	}
out:
3515
	rcu_read_unlock();
3516 3517 3518
	return rt;
}

3519
static struct fib6_info *rt6_add_route_info(struct net *net,
3520
					   const struct in6_addr *prefix, int prefixlen,
3521 3522
					   const struct in6_addr *gwaddr,
					   struct net_device *dev,
3523
					   unsigned int pref)
3524
{
3525
	struct fib6_config cfg = {
3526
		.fc_metric	= IP6_RT_PRIO_USER,
3527
		.fc_ifindex	= dev->ifindex,
3528 3529 3530
		.fc_dst_len	= prefixlen,
		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
				  RTF_UP | RTF_PREF(pref),
3531
		.fc_protocol = RTPROT_RA,
3532
		.fc_type = RTN_UNICAST,
3533
		.fc_nlinfo.portid = 0,
3534 3535
		.fc_nlinfo.nlh = NULL,
		.fc_nlinfo.nl_net = net,
3536 3537
	};

3538
	cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
A
Alexey Dobriyan 已提交
3539 3540
	cfg.fc_dst = *prefix;
	cfg.fc_gateway = *gwaddr;
3541

3542 3543
	/* We should treat it as a default route if prefix length is 0. */
	if (!prefixlen)
3544
		cfg.fc_flags |= RTF_DEFAULT;
3545

3546
	ip6_route_add(&cfg, GFP_ATOMIC, NULL);
3547

3548
	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
3549 3550 3551
}
#endif

3552
struct fib6_info *rt6_get_dflt_router(struct net *net,
3553 3554
				     const struct in6_addr *addr,
				     struct net_device *dev)
3555
{
3556
	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
3557
	struct fib6_info *rt;
T
Thomas Graf 已提交
3558
	struct fib6_table *table;
L
Linus Torvalds 已提交
3559

3560
	table = fib6_get_table(net, tb_id);
3561
	if (!table)
T
Thomas Graf 已提交
3562
		return NULL;
L
Linus Torvalds 已提交
3563

3564 3565
	rcu_read_lock();
	for_each_fib6_node_rt_rcu(&table->tb6_root) {
3566
		if (dev == rt->fib6_nh.nh_dev &&
3567
		    ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3568
		    ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
L
Linus Torvalds 已提交
3569 3570
			break;
	}
3571 3572
	if (rt && !fib6_info_hold_safe(rt))
		rt = NULL;
3573
	rcu_read_unlock();
L
Linus Torvalds 已提交
3574 3575 3576
	return rt;
}

3577
struct fib6_info *rt6_add_dflt_router(struct net *net,
3578
				     const struct in6_addr *gwaddr,
3579 3580
				     struct net_device *dev,
				     unsigned int pref)
L
Linus Torvalds 已提交
3581
{
3582
	struct fib6_config cfg = {
D
David Ahern 已提交
3583
		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
3584
		.fc_metric	= IP6_RT_PRIO_USER,
3585 3586 3587
		.fc_ifindex	= dev->ifindex,
		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
				  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
3588
		.fc_protocol = RTPROT_RA,
3589
		.fc_type = RTN_UNICAST,
3590
		.fc_nlinfo.portid = 0,
3591
		.fc_nlinfo.nlh = NULL,
3592
		.fc_nlinfo.nl_net = net,
3593
	};
L
Linus Torvalds 已提交
3594

A
Alexey Dobriyan 已提交
3595
	cfg.fc_gateway = *gwaddr;
L
Linus Torvalds 已提交
3596

3597
	if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
3598 3599 3600 3601 3602 3603
		struct fib6_table *table;

		table = fib6_get_table(dev_net(dev), cfg.fc_table);
		if (table)
			table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
	}
L
Linus Torvalds 已提交
3604

3605
	return rt6_get_dflt_router(net, gwaddr, dev);
L
Linus Torvalds 已提交
3606 3607
}

3608 3609
static void __rt6_purge_dflt_routers(struct net *net,
				     struct fib6_table *table)
L
Linus Torvalds 已提交
3610
{
3611
	struct fib6_info *rt;
L
Linus Torvalds 已提交
3612 3613

restart:
3614 3615
	rcu_read_lock();
	for_each_fib6_node_rt_rcu(&table->tb6_root) {
D
David Ahern 已提交
3616 3617 3618
		struct net_device *dev = fib6_info_nh_dev(rt);
		struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;

3619
		if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3620 3621
		    (!idev || idev->cnf.accept_ra != 2) &&
		    fib6_info_hold_safe(rt)) {
3622 3623
			rcu_read_unlock();
			ip6_del_rt(net, rt);
L
Linus Torvalds 已提交
3624 3625 3626
			goto restart;
		}
	}
3627
	rcu_read_unlock();
3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643

	table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
}

void rt6_purge_dflt_routers(struct net *net)
{
	struct fib6_table *table;
	struct hlist_head *head;
	unsigned int h;

	rcu_read_lock();

	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
		head = &net->ipv6.fib_table_hash[h];
		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
			if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
3644
				__rt6_purge_dflt_routers(net, table);
3645 3646 3647 3648
		}
	}

	rcu_read_unlock();
L
Linus Torvalds 已提交
3649 3650
}

3651 3652
static void rtmsg_to_fib6_config(struct net *net,
				 struct in6_rtmsg *rtmsg,
3653 3654 3655 3656
				 struct fib6_config *cfg)
{
	memset(cfg, 0, sizeof(*cfg));

D
David Ahern 已提交
3657 3658
	cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
			 : RT6_TABLE_MAIN;
3659 3660 3661 3662 3663 3664
	cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
	cfg->fc_metric = rtmsg->rtmsg_metric;
	cfg->fc_expires = rtmsg->rtmsg_info;
	cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
	cfg->fc_src_len = rtmsg->rtmsg_src_len;
	cfg->fc_flags = rtmsg->rtmsg_flags;
3665
	cfg->fc_type = rtmsg->rtmsg_type;
3666

3667
	cfg->fc_nlinfo.nl_net = net;
3668

A
Alexey Dobriyan 已提交
3669 3670 3671
	cfg->fc_dst = rtmsg->rtmsg_dst;
	cfg->fc_src = rtmsg->rtmsg_src;
	cfg->fc_gateway = rtmsg->rtmsg_gateway;
3672 3673
}

3674
int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
L
Linus Torvalds 已提交
3675
{
3676
	struct fib6_config cfg;
L
Linus Torvalds 已提交
3677 3678 3679
	struct in6_rtmsg rtmsg;
	int err;

3680
	switch (cmd) {
L
Linus Torvalds 已提交
3681 3682
	case SIOCADDRT:		/* Add a route */
	case SIOCDELRT:		/* Delete a route */
3683
		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
L
Linus Torvalds 已提交
3684 3685 3686 3687 3688
			return -EPERM;
		err = copy_from_user(&rtmsg, arg,
				     sizeof(struct in6_rtmsg));
		if (err)
			return -EFAULT;
3689

3690
		rtmsg_to_fib6_config(net, &rtmsg, &cfg);
3691

L
Linus Torvalds 已提交
3692 3693 3694
		rtnl_lock();
		switch (cmd) {
		case SIOCADDRT:
3695
			err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
L
Linus Torvalds 已提交
3696 3697
			break;
		case SIOCDELRT:
3698
			err = ip6_route_del(&cfg, NULL);
L
Linus Torvalds 已提交
3699 3700 3701 3702 3703 3704 3705
			break;
		default:
			err = -EINVAL;
		}
		rtnl_unlock();

		return err;
3706
	}
L
Linus Torvalds 已提交
3707 3708 3709 3710 3711 3712 3713 3714

	return -EINVAL;
}

/*
 *	Drop the packet on the floor
 */

3715
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
L
Linus Torvalds 已提交
3716
{
3717
	int type;
E
Eric Dumazet 已提交
3718
	struct dst_entry *dst = skb_dst(skb);
3719 3720
	switch (ipstats_mib_noroutes) {
	case IPSTATS_MIB_INNOROUTES:
3721
		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
U
Ulrich Weber 已提交
3722
		if (type == IPV6_ADDR_ANY) {
3723 3724
			IP6_INC_STATS(dev_net(dst->dev),
				      __in6_dev_get_safely(skb->dev),
3725
				      IPSTATS_MIB_INADDRERRORS);
3726 3727 3728 3729
			break;
		}
		/* FALLTHROUGH */
	case IPSTATS_MIB_OUTNOROUTES:
3730 3731
		IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
			      ipstats_mib_noroutes);
3732 3733
		break;
	}
3734
	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
L
Linus Torvalds 已提交
3735 3736 3737 3738
	kfree_skb(skb);
	return 0;
}

3739 3740
static int ip6_pkt_discard(struct sk_buff *skb)
{
3741
	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
3742 3743
}

E
Eric W. Biederman 已提交
3744
static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
3745
{
E
Eric Dumazet 已提交
3746
	skb->dev = skb_dst(skb)->dev;
3747
	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
L
Linus Torvalds 已提交
3748 3749
}

3750 3751
static int ip6_pkt_prohibit(struct sk_buff *skb)
{
3752
	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
3753 3754
}

E
Eric W. Biederman 已提交
3755
static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3756
{
E
Eric Dumazet 已提交
3757
	skb->dev = skb_dst(skb)->dev;
3758
	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
3759 3760
}

L
Linus Torvalds 已提交
3761 3762 3763 3764
/*
 *	Allocate a dst for local (unicast / anycast) address.
 */

3765 3766 3767 3768
struct fib6_info *addrconf_f6i_alloc(struct net *net,
				     struct inet6_dev *idev,
				     const struct in6_addr *addr,
				     bool anycast, gfp_t gfp_flags)
L
Linus Torvalds 已提交
3769
{
D
David Ahern 已提交
3770
	u32 tb_id;
3771
	struct net_device *dev = idev->dev;
3772
	struct fib6_info *f6i;
3773

3774 3775
	f6i = fib6_info_alloc(gfp_flags);
	if (!f6i)
L
Linus Torvalds 已提交
3776 3777
		return ERR_PTR(-ENOMEM);

3778 3779 3780 3781
	f6i->dst_nocount = true;
	f6i->dst_host = true;
	f6i->fib6_protocol = RTPROT_KERNEL;
	f6i->fib6_flags = RTF_UP | RTF_NONEXTHOP;
3782
	if (anycast) {
3783 3784
		f6i->fib6_type = RTN_ANYCAST;
		f6i->fib6_flags |= RTF_ANYCAST;
3785
	} else {
3786 3787
		f6i->fib6_type = RTN_LOCAL;
		f6i->fib6_flags |= RTF_LOCAL;
3788
	}
L
Linus Torvalds 已提交
3789

3790
	f6i->fib6_nh.nh_gw = *addr;
3791
	dev_hold(dev);
3792 3793 3794
	f6i->fib6_nh.nh_dev = dev;
	f6i->fib6_dst.addr = *addr;
	f6i->fib6_dst.plen = 128;
D
David Ahern 已提交
3795
	tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
3796
	f6i->fib6_table = fib6_get_table(net, tb_id);
L
Linus Torvalds 已提交
3797

3798
	return f6i;
L
Linus Torvalds 已提交
3799 3800
}

3801 3802 3803 3804 3805 3806 3807
/* remove deleted ip from prefsrc entries */
struct arg_dev_net_ip {
	struct net_device *dev;
	struct net *net;
	struct in6_addr *addr;
};

3808
static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
3809 3810 3811 3812 3813
{
	struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;

3814
	if (((void *)rt->fib6_nh.nh_dev == dev || !dev) &&
D
David Ahern 已提交
3815
	    rt != net->ipv6.fib6_null_entry &&
3816
	    ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
3817
		spin_lock_bh(&rt6_exception_lock);
3818
		/* remove prefsrc entry */
3819
		rt->fib6_prefsrc.plen = 0;
3820 3821 3822
		/* need to update cache as well */
		rt6_exceptions_remove_prefsrc(rt);
		spin_unlock_bh(&rt6_exception_lock);
3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834
	}
	return 0;
}

void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
{
	struct net *net = dev_net(ifp->idev->dev);
	struct arg_dev_net_ip adni = {
		.dev = ifp->idev->dev,
		.net = net,
		.addr = &ifp->addr,
	};
3835
	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
3836 3837
}

3838 3839 3840
#define RTF_RA_ROUTER		(RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)

/* Remove routers and update dst entries when gateway turn into host. */
3841
static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
3842 3843 3844
{
	struct in6_addr *gateway = (struct in6_addr *)arg;

3845
	if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
3846
	    ipv6_addr_equal(gateway, &rt->fib6_nh.nh_gw)) {
3847 3848
		return -1;
	}
3849 3850 3851 3852 3853 3854 3855

	/* Further clean up cached routes in exception table.
	 * This is needed because cached route may have a different
	 * gateway than its 'parent' in the case of an ip redirect.
	 */
	rt6_exceptions_clean_tohost(rt, gateway);

3856 3857 3858 3859 3860 3861 3862 3863
	return 0;
}

void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
{
	fib6_clean_all(net, fib6_clean_tohost, gateway);
}

3864 3865
struct arg_netdev_event {
	const struct net_device *dev;
3866 3867 3868 3869
	union {
		unsigned int nh_flags;
		unsigned long event;
	};
3870 3871
};

3872
static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
3873
{
3874
	struct fib6_info *iter;
3875 3876
	struct fib6_node *fn;

3877 3878
	fn = rcu_dereference_protected(rt->fib6_node,
			lockdep_is_held(&rt->fib6_table->tb6_lock));
3879
	iter = rcu_dereference_protected(fn->leaf,
3880
			lockdep_is_held(&rt->fib6_table->tb6_lock));
3881
	while (iter) {
3882
		if (iter->fib6_metric == rt->fib6_metric &&
3883
		    rt6_qualify_for_ecmp(iter))
3884
			return iter;
3885
		iter = rcu_dereference_protected(iter->fib6_next,
3886
				lockdep_is_held(&rt->fib6_table->tb6_lock));
3887 3888 3889 3890 3891
	}

	return NULL;
}

3892
static bool rt6_is_dead(const struct fib6_info *rt)
3893
{
3894 3895
	if (rt->fib6_nh.nh_flags & RTNH_F_DEAD ||
	    (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
D
David Ahern 已提交
3896
	     fib6_ignore_linkdown(rt)))
3897 3898 3899 3900 3901
		return true;

	return false;
}

3902
static int rt6_multipath_total_weight(const struct fib6_info *rt)
3903
{
3904
	struct fib6_info *iter;
3905 3906 3907
	int total = 0;

	if (!rt6_is_dead(rt))
3908
		total += rt->fib6_nh.nh_weight;
3909

3910
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
3911
		if (!rt6_is_dead(iter))
3912
			total += iter->fib6_nh.nh_weight;
3913 3914 3915 3916 3917
	}

	return total;
}

3918
static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
3919 3920 3921 3922
{
	int upper_bound = -1;

	if (!rt6_is_dead(rt)) {
3923
		*weight += rt->fib6_nh.nh_weight;
3924 3925 3926
		upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
						    total) - 1;
	}
3927
	atomic_set(&rt->fib6_nh.nh_upper_bound, upper_bound);
3928 3929
}

3930
static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
3931
{
3932
	struct fib6_info *iter;
3933 3934 3935 3936
	int weight = 0;

	rt6_upper_bound_set(rt, &weight, total);

3937
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
3938 3939 3940
		rt6_upper_bound_set(iter, &weight, total);
}

3941
void rt6_multipath_rebalance(struct fib6_info *rt)
3942
{
3943
	struct fib6_info *first;
3944 3945 3946 3947 3948 3949
	int total;

	/* In case the entire multipath route was marked for flushing,
	 * then there is no need to rebalance upon the removal of every
	 * sibling route.
	 */
3950
	if (!rt->fib6_nsiblings || rt->should_flush)
3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964
		return;

	/* During lookup routes are evaluated in order, so we need to
	 * make sure upper bounds are assigned from the first sibling
	 * onwards.
	 */
	first = rt6_multipath_first_sibling(rt);
	if (WARN_ON_ONCE(!first))
		return;

	total = rt6_multipath_total_weight(first);
	rt6_multipath_upper_bound_set(first, total);
}

3965
static int fib6_ifup(struct fib6_info *rt, void *p_arg)
3966 3967
{
	const struct arg_netdev_event *arg = p_arg;
3968
	struct net *net = dev_net(arg->dev);
3969

D
David Ahern 已提交
3970
	if (rt != net->ipv6.fib6_null_entry && rt->fib6_nh.nh_dev == arg->dev) {
3971
		rt->fib6_nh.nh_flags &= ~arg->nh_flags;
3972
		fib6_update_sernum_upto_root(net, rt);
3973
		rt6_multipath_rebalance(rt);
3974
	}
3975 3976 3977 3978 3979 3980 3981 3982

	return 0;
}

void rt6_sync_up(struct net_device *dev, unsigned int nh_flags)
{
	struct arg_netdev_event arg = {
		.dev = dev,
I
Ido Schimmel 已提交
3983 3984 3985
		{
			.nh_flags = nh_flags,
		},
3986 3987 3988 3989 3990 3991 3992 3993
	};

	if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
		arg.nh_flags |= RTNH_F_LINKDOWN;

	fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
}

3994
static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
3995 3996
				   const struct net_device *dev)
{
3997
	struct fib6_info *iter;
3998

3999
	if (rt->fib6_nh.nh_dev == dev)
4000
		return true;
4001
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4002
		if (iter->fib6_nh.nh_dev == dev)
4003 4004 4005 4006 4007
			return true;

	return false;
}

4008
static void rt6_multipath_flush(struct fib6_info *rt)
4009
{
4010
	struct fib6_info *iter;
4011 4012

	rt->should_flush = 1;
4013
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4014 4015 4016
		iter->should_flush = 1;
}

4017
static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4018 4019
					     const struct net_device *down_dev)
{
4020
	struct fib6_info *iter;
4021 4022
	unsigned int dead = 0;

4023 4024
	if (rt->fib6_nh.nh_dev == down_dev ||
	    rt->fib6_nh.nh_flags & RTNH_F_DEAD)
4025
		dead++;
4026
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4027 4028
		if (iter->fib6_nh.nh_dev == down_dev ||
		    iter->fib6_nh.nh_flags & RTNH_F_DEAD)
4029 4030 4031 4032 4033
			dead++;

	return dead;
}

4034
static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4035 4036 4037
				       const struct net_device *dev,
				       unsigned int nh_flags)
{
4038
	struct fib6_info *iter;
4039

4040 4041
	if (rt->fib6_nh.nh_dev == dev)
		rt->fib6_nh.nh_flags |= nh_flags;
4042
	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4043 4044
		if (iter->fib6_nh.nh_dev == dev)
			iter->fib6_nh.nh_flags |= nh_flags;
4045 4046
}

4047
/* called with write lock held for table with rt */
4048
static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
L
Linus Torvalds 已提交
4049
{
4050 4051
	const struct arg_netdev_event *arg = p_arg;
	const struct net_device *dev = arg->dev;
4052
	struct net *net = dev_net(dev);
4053

D
David Ahern 已提交
4054
	if (rt == net->ipv6.fib6_null_entry)
4055 4056 4057 4058
		return 0;

	switch (arg->event) {
	case NETDEV_UNREGISTER:
4059
		return rt->fib6_nh.nh_dev == dev ? -1 : 0;
4060
	case NETDEV_DOWN:
4061
		if (rt->should_flush)
4062
			return -1;
4063
		if (!rt->fib6_nsiblings)
4064
			return rt->fib6_nh.nh_dev == dev ? -1 : 0;
4065 4066 4067 4068
		if (rt6_multipath_uses_dev(rt, dev)) {
			unsigned int count;

			count = rt6_multipath_dead_count(rt, dev);
4069
			if (rt->fib6_nsiblings + 1 == count) {
4070 4071 4072 4073 4074
				rt6_multipath_flush(rt);
				return -1;
			}
			rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
						   RTNH_F_LINKDOWN);
4075
			fib6_update_sernum(net, rt);
4076
			rt6_multipath_rebalance(rt);
4077 4078
		}
		return -2;
4079
	case NETDEV_CHANGE:
4080
		if (rt->fib6_nh.nh_dev != dev ||
4081
		    rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4082
			break;
4083
		rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
4084
		rt6_multipath_rebalance(rt);
4085
		break;
4086
	}
4087

L
Linus Torvalds 已提交
4088 4089 4090
	return 0;
}

4091
void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
L
Linus Torvalds 已提交
4092
{
4093
	struct arg_netdev_event arg = {
4094
		.dev = dev,
I
Ido Schimmel 已提交
4095 4096 4097
		{
			.event = event,
		},
4098 4099
	};

4100 4101 4102 4103 4104 4105 4106 4107
	fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
}

void rt6_disable_ip(struct net_device *dev, unsigned long event)
{
	rt6_sync_down_dev(dev, event);
	rt6_uncached_list_flush_dev(dev_net(dev), dev);
	neigh_ifdown(&nd_tbl, dev);
L
Linus Torvalds 已提交
4108 4109
}

4110
struct rt6_mtu_change_arg {
L
Linus Torvalds 已提交
4111
	struct net_device *dev;
4112
	unsigned int mtu;
L
Linus Torvalds 已提交
4113 4114
};

4115
static int rt6_mtu_change_route(struct fib6_info *rt, void *p_arg)
L
Linus Torvalds 已提交
4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126
{
	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
	struct inet6_dev *idev;

	/* In IPv6 pmtu discovery is not optional,
	   so that RTAX_MTU lock cannot disable it.
	   We still use this lock to block changes
	   caused by addrconf/ndisc.
	*/

	idev = __in6_dev_get(arg->dev);
4127
	if (!idev)
L
Linus Torvalds 已提交
4128 4129 4130 4131 4132 4133 4134
		return 0;

	/* For administrative MTU increase, there is no way to discover
	   IPv6 PMTU increase, so PMTU increase should be updated here.
	   Since RFC 1981 doesn't include administrative MTU increase
	   update PMTU increase is a MUST. (i.e. jumbo frame)
	 */
4135
	if (rt->fib6_nh.nh_dev == arg->dev &&
4136 4137 4138 4139 4140 4141 4142
	    !fib6_metric_locked(rt, RTAX_MTU)) {
		u32 mtu = rt->fib6_pmtu;

		if (mtu >= arg->mtu ||
		    (mtu < arg->mtu && mtu == idev->cnf.mtu6))
			fib6_metric_set(rt, RTAX_MTU, arg->mtu);

4143
		spin_lock_bh(&rt6_exception_lock);
4144
		rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
4145
		spin_unlock_bh(&rt6_exception_lock);
4146
	}
L
Linus Torvalds 已提交
4147 4148 4149
	return 0;
}

4150
void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
L
Linus Torvalds 已提交
4151
{
T
Thomas Graf 已提交
4152 4153 4154 4155
	struct rt6_mtu_change_arg arg = {
		.dev = dev,
		.mtu = mtu,
	};
L
Linus Torvalds 已提交
4156

4157
	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
L
Linus Torvalds 已提交
4158 4159
}

4160
static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4161
	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
4162
	[RTA_PREFSRC]		= { .len = sizeof(struct in6_addr) },
4163
	[RTA_OIF]               = { .type = NLA_U32 },
4164
	[RTA_IIF]		= { .type = NLA_U32 },
4165 4166
	[RTA_PRIORITY]          = { .type = NLA_U32 },
	[RTA_METRICS]           = { .type = NLA_NESTED },
4167
	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
4168
	[RTA_PREF]              = { .type = NLA_U8 },
4169 4170
	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
	[RTA_ENCAP]		= { .type = NLA_NESTED },
4171
	[RTA_EXPIRES]		= { .type = NLA_U32 },
4172
	[RTA_UID]		= { .type = NLA_U32 },
4173
	[RTA_MARK]		= { .type = NLA_U32 },
4174
	[RTA_TABLE]		= { .type = NLA_U32 },
4175 4176 4177
	[RTA_IP_PROTO]		= { .type = NLA_U8 },
	[RTA_SPORT]		= { .type = NLA_U16 },
	[RTA_DPORT]		= { .type = NLA_U16 },
4178 4179 4180
};

static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4181 4182
			      struct fib6_config *cfg,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
4183
{
4184 4185
	struct rtmsg *rtm;
	struct nlattr *tb[RTA_MAX+1];
4186
	unsigned int pref;
4187
	int err;
L
Linus Torvalds 已提交
4188

4189 4190
	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
			  NULL);
4191 4192
	if (err < 0)
		goto errout;
L
Linus Torvalds 已提交
4193

4194 4195 4196 4197 4198 4199 4200 4201 4202
	err = -EINVAL;
	rtm = nlmsg_data(nlh);
	memset(cfg, 0, sizeof(*cfg));

	cfg->fc_table = rtm->rtm_table;
	cfg->fc_dst_len = rtm->rtm_dst_len;
	cfg->fc_src_len = rtm->rtm_src_len;
	cfg->fc_flags = RTF_UP;
	cfg->fc_protocol = rtm->rtm_protocol;
4203
	cfg->fc_type = rtm->rtm_type;
4204

4205 4206
	if (rtm->rtm_type == RTN_UNREACHABLE ||
	    rtm->rtm_type == RTN_BLACKHOLE ||
4207 4208
	    rtm->rtm_type == RTN_PROHIBIT ||
	    rtm->rtm_type == RTN_THROW)
4209 4210
		cfg->fc_flags |= RTF_REJECT;

4211 4212 4213
	if (rtm->rtm_type == RTN_LOCAL)
		cfg->fc_flags |= RTF_LOCAL;

4214 4215 4216
	if (rtm->rtm_flags & RTM_F_CLONED)
		cfg->fc_flags |= RTF_CACHE;

4217 4218
	cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);

4219
	cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
4220
	cfg->fc_nlinfo.nlh = nlh;
4221
	cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
4222 4223

	if (tb[RTA_GATEWAY]) {
4224
		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4225
		cfg->fc_flags |= RTF_GATEWAY;
L
Linus Torvalds 已提交
4226
	}
4227 4228 4229 4230
	if (tb[RTA_VIA]) {
		NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
		goto errout;
	}
4231 4232 4233 4234 4235 4236 4237 4238

	if (tb[RTA_DST]) {
		int plen = (rtm->rtm_dst_len + 7) >> 3;

		if (nla_len(tb[RTA_DST]) < plen)
			goto errout;

		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
L
Linus Torvalds 已提交
4239
	}
4240 4241 4242 4243 4244 4245 4246 4247

	if (tb[RTA_SRC]) {
		int plen = (rtm->rtm_src_len + 7) >> 3;

		if (nla_len(tb[RTA_SRC]) < plen)
			goto errout;

		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
L
Linus Torvalds 已提交
4248
	}
4249

4250
	if (tb[RTA_PREFSRC])
4251
		cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4252

4253 4254 4255 4256 4257 4258 4259 4260 4261
	if (tb[RTA_OIF])
		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);

	if (tb[RTA_PRIORITY])
		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);

	if (tb[RTA_METRICS]) {
		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
L
Linus Torvalds 已提交
4262
	}
4263 4264 4265 4266

	if (tb[RTA_TABLE])
		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);

4267 4268 4269
	if (tb[RTA_MULTIPATH]) {
		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4270 4271

		err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4272
						     cfg->fc_mp_len, extack);
4273 4274
		if (err < 0)
			goto errout;
4275 4276
	}

4277 4278 4279 4280 4281 4282 4283 4284
	if (tb[RTA_PREF]) {
		pref = nla_get_u8(tb[RTA_PREF]);
		if (pref != ICMPV6_ROUTER_PREF_LOW &&
		    pref != ICMPV6_ROUTER_PREF_HIGH)
			pref = ICMPV6_ROUTER_PREF_MEDIUM;
		cfg->fc_flags |= RTF_PREF(pref);
	}

4285 4286 4287
	if (tb[RTA_ENCAP])
		cfg->fc_encap = tb[RTA_ENCAP];

4288
	if (tb[RTA_ENCAP_TYPE]) {
4289 4290
		cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);

4291
		err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
4292 4293 4294 4295
		if (err < 0)
			goto errout;
	}

4296 4297 4298 4299 4300 4301 4302 4303 4304
	if (tb[RTA_EXPIRES]) {
		unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);

		if (addrconf_finite_timeout(timeout)) {
			cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
			cfg->fc_flags |= RTF_EXPIRES;
		}
	}

4305 4306 4307
	err = 0;
errout:
	return err;
L
Linus Torvalds 已提交
4308 4309
}

4310
struct rt6_nh {
4311
	struct fib6_info *fib6_info;
4312 4313 4314 4315 4316 4317 4318 4319 4320
	struct fib6_config r_cfg;
	struct list_head next;
};

static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
{
	struct rt6_nh *nh;

	list_for_each_entry(nh, rt6_nh_list, next) {
4321
		pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
4322 4323 4324 4325 4326
		        &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
		        nh->r_cfg.fc_ifindex);
	}
}

4327 4328
static int ip6_route_info_append(struct net *net,
				 struct list_head *rt6_nh_list,
4329 4330
				 struct fib6_info *rt,
				 struct fib6_config *r_cfg)
4331 4332 4333 4334 4335
{
	struct rt6_nh *nh;
	int err = -EEXIST;

	list_for_each_entry(nh, rt6_nh_list, next) {
4336 4337
		/* check if fib6_info already exists */
		if (rt6_duplicate_nexthop(nh->fib6_info, rt))
4338 4339 4340 4341 4342 4343
			return err;
	}

	nh = kzalloc(sizeof(*nh), GFP_KERNEL);
	if (!nh)
		return -ENOMEM;
4344
	nh->fib6_info = rt;
4345 4346 4347 4348 4349 4350
	memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
	list_add_tail(&nh->next, rt6_nh_list);

	return 0;
}

4351 4352
static void ip6_route_mpath_notify(struct fib6_info *rt,
				   struct fib6_info *rt_last,
4353 4354 4355 4356 4357 4358 4359 4360 4361
				   struct nl_info *info,
				   __u16 nlflags)
{
	/* if this is an APPEND route, then rt points to the first route
	 * inserted and rt_last points to last route inserted. Userspace
	 * wants a consistent dump of the route which starts at the first
	 * nexthop. Since sibling routes are always added at the end of
	 * the list, find the first sibling of the last route appended
	 */
4362 4363
	if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
		rt = list_first_entry(&rt_last->fib6_siblings,
4364
				      struct fib6_info,
4365
				      fib6_siblings);
4366 4367 4368 4369 4370 4371
	}

	if (rt)
		inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
}

4372 4373
static int ip6_route_multipath_add(struct fib6_config *cfg,
				   struct netlink_ext_ack *extack)
4374
{
4375
	struct fib6_info *rt_notif = NULL, *rt_last = NULL;
4376
	struct nl_info *info = &cfg->fc_nlinfo;
4377 4378
	struct fib6_config r_cfg;
	struct rtnexthop *rtnh;
4379
	struct fib6_info *rt;
4380 4381
	struct rt6_nh *err_nh;
	struct rt6_nh *nh, *nh_safe;
4382
	__u16 nlflags;
4383 4384
	int remaining;
	int attrlen;
4385 4386 4387 4388 4389
	int err = 1;
	int nhn = 0;
	int replace = (cfg->fc_nlinfo.nlh &&
		       (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
	LIST_HEAD(rt6_nh_list);
4390

4391 4392 4393 4394
	nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
	if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
		nlflags |= NLM_F_APPEND;

4395
	remaining = cfg->fc_mp_len;
4396 4397
	rtnh = (struct rtnexthop *)cfg->fc_mp;

4398
	/* Parse a Multipath Entry and build a list (rt6_nh_list) of
4399
	 * fib6_info structs per nexthop
4400
	 */
4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411
	while (rtnh_ok(rtnh, remaining)) {
		memcpy(&r_cfg, cfg, sizeof(*cfg));
		if (rtnh->rtnh_ifindex)
			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;

		attrlen = rtnh_attrlen(rtnh);
		if (attrlen > 0) {
			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);

			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
			if (nla) {
4412
				r_cfg.fc_gateway = nla_get_in6_addr(nla);
4413 4414
				r_cfg.fc_flags |= RTF_GATEWAY;
			}
4415 4416 4417 4418
			r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
			nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
			if (nla)
				r_cfg.fc_encap_type = nla_get_u16(nla);
4419
		}
4420

4421
		r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
4422
		rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
4423 4424 4425
		if (IS_ERR(rt)) {
			err = PTR_ERR(rt);
			rt = NULL;
4426
			goto cleanup;
4427
		}
4428 4429 4430 4431 4432 4433 4434
		if (!rt6_qualify_for_ecmp(rt)) {
			err = -EINVAL;
			NL_SET_ERR_MSG(extack,
				       "Device only routes can not be added for IPv6 using the multipath API.");
			fib6_info_release(rt);
			goto cleanup;
		}
4435

4436
		rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1;
4437

4438 4439
		err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
					    rt, &r_cfg);
4440
		if (err) {
4441
			fib6_info_release(rt);
4442 4443 4444 4445 4446 4447
			goto cleanup;
		}

		rtnh = rtnh_next(rtnh, &remaining);
	}

4448 4449 4450 4451 4452 4453
	/* for add and replace send one notification with all nexthops.
	 * Skip the notification in fib6_add_rt2node and send one with
	 * the full route when done
	 */
	info->skip_notify = 1;

4454 4455
	err_nh = NULL;
	list_for_each_entry(nh, &rt6_nh_list, next) {
4456 4457
		err = __ip6_ins_rt(nh->fib6_info, info, extack);
		fib6_info_release(nh->fib6_info);
4458

4459 4460 4461 4462 4463 4464 4465 4466
		if (!err) {
			/* save reference to last route successfully inserted */
			rt_last = nh->fib6_info;

			/* save reference to first route for notification */
			if (!rt_notif)
				rt_notif = nh->fib6_info;
		}
4467

4468 4469
		/* nh->fib6_info is used or freed at this point, reset to NULL*/
		nh->fib6_info = NULL;
4470 4471 4472 4473 4474
		if (err) {
			if (replace && nhn)
				ip6_print_replace_route_err(&rt6_nh_list);
			err_nh = nh;
			goto add_errout;
4475
		}
4476

4477
		/* Because each route is added like a single route we remove
4478 4479 4480 4481 4482
		 * these flags after the first nexthop: if there is a collision,
		 * we have already failed to add the first nexthop:
		 * fib6_add_rt2node() has rejected it; when replacing, old
		 * nexthops have been replaced by first new, the rest should
		 * be added to it.
4483
		 */
4484 4485
		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
						     NLM_F_REPLACE);
4486 4487 4488
		nhn++;
	}

4489 4490
	/* success ... tell user about new route */
	ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4491 4492 4493
	goto cleanup;

add_errout:
4494 4495 4496 4497 4498 4499 4500
	/* send notification for routes that were added so that
	 * the delete notifications sent by ip6_route_del are
	 * coherent
	 */
	if (rt_notif)
		ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);

4501 4502 4503 4504
	/* Delete routes that were already added */
	list_for_each_entry(nh, &rt6_nh_list, next) {
		if (err_nh == nh)
			break;
4505
		ip6_route_del(&nh->r_cfg, extack);
4506 4507 4508 4509
	}

cleanup:
	list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
4510 4511
		if (nh->fib6_info)
			fib6_info_release(nh->fib6_info);
4512 4513 4514 4515 4516 4517 4518
		list_del(&nh->next);
		kfree(nh);
	}

	return err;
}

4519 4520
static int ip6_route_multipath_del(struct fib6_config *cfg,
				   struct netlink_ext_ack *extack)
4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546
{
	struct fib6_config r_cfg;
	struct rtnexthop *rtnh;
	int remaining;
	int attrlen;
	int err = 1, last_err = 0;

	remaining = cfg->fc_mp_len;
	rtnh = (struct rtnexthop *)cfg->fc_mp;

	/* Parse a Multipath Entry */
	while (rtnh_ok(rtnh, remaining)) {
		memcpy(&r_cfg, cfg, sizeof(*cfg));
		if (rtnh->rtnh_ifindex)
			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;

		attrlen = rtnh_attrlen(rtnh);
		if (attrlen > 0) {
			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);

			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
			if (nla) {
				nla_memcpy(&r_cfg.fc_gateway, nla, 16);
				r_cfg.fc_flags |= RTF_GATEWAY;
			}
		}
4547
		err = ip6_route_del(&r_cfg, extack);
4548 4549 4550
		if (err)
			last_err = err;

4551 4552 4553 4554 4555 4556
		rtnh = rtnh_next(rtnh, &remaining);
	}

	return last_err;
}

4557 4558
static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
4559
{
4560 4561
	struct fib6_config cfg;
	int err;
L
Linus Torvalds 已提交
4562

4563
	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4564 4565 4566
	if (err < 0)
		return err;

4567
	if (cfg.fc_mp)
4568
		return ip6_route_multipath_del(&cfg, extack);
4569 4570
	else {
		cfg.fc_delete_all_nh = 1;
4571
		return ip6_route_del(&cfg, extack);
4572
	}
L
Linus Torvalds 已提交
4573 4574
}

4575 4576
static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
4577
{
4578 4579
	struct fib6_config cfg;
	int err;
L
Linus Torvalds 已提交
4580

4581
	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4582 4583 4584
	if (err < 0)
		return err;

4585
	if (cfg.fc_mp)
4586
		return ip6_route_multipath_add(&cfg, extack);
4587
	else
4588
		return ip6_route_add(&cfg, GFP_KERNEL, extack);
L
Linus Torvalds 已提交
4589 4590
}

4591
static size_t rt6_nlmsg_size(struct fib6_info *rt)
4592
{
4593 4594
	int nexthop_len = 0;

4595
	if (rt->fib6_nsiblings) {
4596 4597 4598
		nexthop_len = nla_total_size(0)	 /* RTA_MULTIPATH */
			    + NLA_ALIGN(sizeof(struct rtnexthop))
			    + nla_total_size(16) /* RTA_GATEWAY */
4599
			    + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate);
4600

4601
		nexthop_len *= rt->fib6_nsiblings;
4602 4603
	}

4604 4605 4606 4607 4608 4609 4610 4611 4612
	return NLMSG_ALIGN(sizeof(struct rtmsg))
	       + nla_total_size(16) /* RTA_SRC */
	       + nla_total_size(16) /* RTA_DST */
	       + nla_total_size(16) /* RTA_GATEWAY */
	       + nla_total_size(16) /* RTA_PREFSRC */
	       + nla_total_size(4) /* RTA_TABLE */
	       + nla_total_size(4) /* RTA_IIF */
	       + nla_total_size(4) /* RTA_OIF */
	       + nla_total_size(4) /* RTA_PRIORITY */
4613
	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
4614
	       + nla_total_size(sizeof(struct rta_cacheinfo))
4615
	       + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
4616
	       + nla_total_size(1) /* RTA_PREF */
4617
	       + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate)
4618 4619 4620
	       + nexthop_len;
}

4621
static int rt6_nexthop_info(struct sk_buff *skb, struct fib6_info *rt,
4622
			    unsigned int *flags, bool skip_oif)
4623
{
4624
	if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
4625 4626
		*flags |= RTNH_F_DEAD;

4627
	if (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN) {
4628
		*flags |= RTNH_F_LINKDOWN;
D
David Ahern 已提交
4629 4630 4631

		rcu_read_lock();
		if (fib6_ignore_linkdown(rt))
4632
			*flags |= RTNH_F_DEAD;
D
David Ahern 已提交
4633
		rcu_read_unlock();
4634 4635
	}

4636
	if (rt->fib6_flags & RTF_GATEWAY) {
4637
		if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->fib6_nh.nh_gw) < 0)
4638 4639 4640
			goto nla_put_failure;
	}

4641 4642
	*flags |= (rt->fib6_nh.nh_flags & RTNH_F_ONLINK);
	if (rt->fib6_nh.nh_flags & RTNH_F_OFFLOAD)
4643 4644
		*flags |= RTNH_F_OFFLOAD;

4645
	/* not needed for multipath encoding b/c it has a rtnexthop struct */
4646 4647
	if (!skip_oif && rt->fib6_nh.nh_dev &&
	    nla_put_u32(skb, RTA_OIF, rt->fib6_nh.nh_dev->ifindex))
4648 4649
		goto nla_put_failure;

4650 4651
	if (rt->fib6_nh.nh_lwtstate &&
	    lwtunnel_fill_encap(skb, rt->fib6_nh.nh_lwtstate) < 0)
4652 4653 4654 4655 4656 4657 4658 4659
		goto nla_put_failure;

	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

4660
/* add multipath next hop */
4661
static int rt6_add_nexthop(struct sk_buff *skb, struct fib6_info *rt)
4662
{
4663
	const struct net_device *dev = rt->fib6_nh.nh_dev;
4664 4665 4666 4667 4668 4669 4670
	struct rtnexthop *rtnh;
	unsigned int flags = 0;

	rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
	if (!rtnh)
		goto nla_put_failure;

4671 4672
	rtnh->rtnh_hops = rt->fib6_nh.nh_weight - 1;
	rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
4673

4674
	if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685
		goto nla_put_failure;

	rtnh->rtnh_flags = flags;

	/* length of rtnetlink header + attributes */
	rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;

	return 0;

nla_put_failure:
	return -EMSGSIZE;
4686 4687
}

4688
static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4689
			 struct fib6_info *rt, struct dst_entry *dst,
4690
			 struct in6_addr *dest, struct in6_addr *src,
4691
			 int iif, int type, u32 portid, u32 seq,
4692
			 unsigned int flags)
L
Linus Torvalds 已提交
4693
{
4694 4695 4696
	struct rt6_info *rt6 = (struct rt6_info *)dst;
	struct rt6key *rt6_dst, *rt6_src;
	u32 *pmetrics, table, rt6_flags;
4697
	struct nlmsghdr *nlh;
4698
	struct rtmsg *rtm;
4699
	long expires = 0;
L
Linus Torvalds 已提交
4700

4701
	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4702
	if (!nlh)
4703
		return -EMSGSIZE;
4704

4705 4706 4707 4708 4709 4710 4711 4712 4713 4714
	if (rt6) {
		rt6_dst = &rt6->rt6i_dst;
		rt6_src = &rt6->rt6i_src;
		rt6_flags = rt6->rt6i_flags;
	} else {
		rt6_dst = &rt->fib6_dst;
		rt6_src = &rt->fib6_src;
		rt6_flags = rt->fib6_flags;
	}

4715
	rtm = nlmsg_data(nlh);
L
Linus Torvalds 已提交
4716
	rtm->rtm_family = AF_INET6;
4717 4718
	rtm->rtm_dst_len = rt6_dst->plen;
	rtm->rtm_src_len = rt6_src->plen;
L
Linus Torvalds 已提交
4719
	rtm->rtm_tos = 0;
4720 4721
	if (rt->fib6_table)
		table = rt->fib6_table->tb6_id;
T
Thomas Graf 已提交
4722
	else
4723
		table = RT6_TABLE_UNSPEC;
4724
	rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
D
David S. Miller 已提交
4725 4726
	if (nla_put_u32(skb, RTA_TABLE, table))
		goto nla_put_failure;
4727 4728

	rtm->rtm_type = rt->fib6_type;
L
Linus Torvalds 已提交
4729 4730
	rtm->rtm_flags = 0;
	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4731
	rtm->rtm_protocol = rt->fib6_protocol;
L
Linus Torvalds 已提交
4732

4733
	if (rt6_flags & RTF_CACHE)
L
Linus Torvalds 已提交
4734 4735
		rtm->rtm_flags |= RTM_F_CLONED;

4736 4737
	if (dest) {
		if (nla_put_in6_addr(skb, RTA_DST, dest))
D
David S. Miller 已提交
4738
			goto nla_put_failure;
4739
		rtm->rtm_dst_len = 128;
L
Linus Torvalds 已提交
4740
	} else if (rtm->rtm_dst_len)
4741
		if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
D
David S. Miller 已提交
4742
			goto nla_put_failure;
L
Linus Torvalds 已提交
4743 4744
#ifdef CONFIG_IPV6_SUBTREES
	if (src) {
4745
		if (nla_put_in6_addr(skb, RTA_SRC, src))
D
David S. Miller 已提交
4746
			goto nla_put_failure;
4747
		rtm->rtm_src_len = 128;
D
David S. Miller 已提交
4748
	} else if (rtm->rtm_src_len &&
4749
		   nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
D
David S. Miller 已提交
4750
		goto nla_put_failure;
L
Linus Torvalds 已提交
4751
#endif
4752 4753
	if (iif) {
#ifdef CONFIG_IPV6_MROUTE
4754
		if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
4755 4756 4757 4758 4759 4760
			int err = ip6mr_get_route(net, skb, rtm, portid);

			if (err == 0)
				return 0;
			if (err < 0)
				goto nla_put_failure;
4761 4762
		} else
#endif
D
David S. Miller 已提交
4763 4764
			if (nla_put_u32(skb, RTA_IIF, iif))
				goto nla_put_failure;
4765
	} else if (dest) {
L
Linus Torvalds 已提交
4766
		struct in6_addr saddr_buf;
4767
		if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
4768
		    nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
D
David S. Miller 已提交
4769
			goto nla_put_failure;
L
Linus Torvalds 已提交
4770
	}
4771

4772
	if (rt->fib6_prefsrc.plen) {
4773
		struct in6_addr saddr_buf;
4774
		saddr_buf = rt->fib6_prefsrc.addr;
4775
		if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
D
David S. Miller 已提交
4776
			goto nla_put_failure;
4777 4778
	}

4779 4780
	pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
	if (rtnetlink_put_metrics(skb, pmetrics) < 0)
4781 4782
		goto nla_put_failure;

4783
	if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
D
David S. Miller 已提交
4784
		goto nla_put_failure;
4785

4786 4787 4788
	/* For multipath routes, walk the siblings list and add
	 * each as a nexthop within RTA_MULTIPATH.
	 */
4789 4790 4791 4792 4793 4794 4795 4796
	if (rt6) {
		if (rt6_flags & RTF_GATEWAY &&
		    nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
			goto nla_put_failure;

		if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
			goto nla_put_failure;
	} else if (rt->fib6_nsiblings) {
4797
		struct fib6_info *sibling, *next_sibling;
4798 4799 4800 4801 4802 4803 4804 4805 4806 4807
		struct nlattr *mp;

		mp = nla_nest_start(skb, RTA_MULTIPATH);
		if (!mp)
			goto nla_put_failure;

		if (rt6_add_nexthop(skb, rt) < 0)
			goto nla_put_failure;

		list_for_each_entry_safe(sibling, next_sibling,
4808
					 &rt->fib6_siblings, fib6_siblings) {
4809 4810 4811 4812 4813 4814
			if (rt6_add_nexthop(skb, sibling) < 0)
				goto nla_put_failure;
		}

		nla_nest_end(skb, mp);
	} else {
4815
		if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
4816 4817 4818
			goto nla_put_failure;
	}

4819
	if (rt6_flags & RTF_EXPIRES) {
4820 4821 4822
		expires = dst ? dst->expires : rt->expires;
		expires -= jiffies;
	}
4823

4824
	if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
4825
		goto nla_put_failure;
4826

4827
	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
4828 4829
		goto nla_put_failure;

4830

4831 4832
	nlmsg_end(skb, nlh);
	return 0;
4833 4834

nla_put_failure:
4835 4836
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
L
Linus Torvalds 已提交
4837 4838
}

4839
int rt6_dump_route(struct fib6_info *rt, void *p_arg)
L
Linus Torvalds 已提交
4840 4841
{
	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
4842 4843
	struct net *net = arg->net;

D
David Ahern 已提交
4844
	if (rt == net->ipv6.fib6_null_entry)
4845
		return 0;
L
Linus Torvalds 已提交
4846

4847 4848
	if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
		struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
4849 4850 4851

		/* user wants prefix routes only */
		if (rtm->rtm_flags & RTM_F_PREFIX &&
4852
		    !(rt->fib6_flags & RTF_PREFIX_RT)) {
4853 4854 4855 4856
			/* success since this is not a prefix route */
			return 1;
		}
	}
L
Linus Torvalds 已提交
4857

4858 4859 4860
	return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0,
			     RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid,
			     arg->cb->nlh->nlmsg_seq, NLM_F_MULTI);
L
Linus Torvalds 已提交
4861 4862
}

4863 4864
static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
4865
{
4866
	struct net *net = sock_net(in_skb->sk);
4867
	struct nlattr *tb[RTA_MAX+1];
4868
	int err, iif = 0, oif = 0;
4869
	struct fib6_info *from;
4870
	struct dst_entry *dst;
4871
	struct rt6_info *rt;
L
Linus Torvalds 已提交
4872
	struct sk_buff *skb;
4873
	struct rtmsg *rtm;
4874
	struct flowi6 fl6;
4875
	bool fibmatch;
L
Linus Torvalds 已提交
4876

4877
	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4878
			  extack);
4879 4880
	if (err < 0)
		goto errout;
L
Linus Torvalds 已提交
4881

4882
	err = -EINVAL;
4883
	memset(&fl6, 0, sizeof(fl6));
4884 4885
	rtm = nlmsg_data(nlh);
	fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
4886
	fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
L
Linus Torvalds 已提交
4887

4888 4889 4890 4891
	if (tb[RTA_SRC]) {
		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
			goto errout;

A
Alexey Dobriyan 已提交
4892
		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
4893 4894 4895 4896 4897 4898
	}

	if (tb[RTA_DST]) {
		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
			goto errout;

A
Alexey Dobriyan 已提交
4899
		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
4900 4901 4902 4903 4904 4905
	}

	if (tb[RTA_IIF])
		iif = nla_get_u32(tb[RTA_IIF]);

	if (tb[RTA_OIF])
4906
		oif = nla_get_u32(tb[RTA_OIF]);
L
Linus Torvalds 已提交
4907

4908 4909 4910
	if (tb[RTA_MARK])
		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);

4911 4912 4913 4914 4915 4916
	if (tb[RTA_UID])
		fl6.flowi6_uid = make_kuid(current_user_ns(),
					   nla_get_u32(tb[RTA_UID]));
	else
		fl6.flowi6_uid = iif ? INVALID_UID : current_uid();

4917 4918 4919 4920 4921 4922 4923 4924
	if (tb[RTA_SPORT])
		fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);

	if (tb[RTA_DPORT])
		fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);

	if (tb[RTA_IP_PROTO]) {
		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
4925 4926
						  &fl6.flowi6_proto, AF_INET6,
						  extack);
4927 4928 4929 4930
		if (err)
			goto errout;
	}

L
Linus Torvalds 已提交
4931 4932
	if (iif) {
		struct net_device *dev;
4933 4934
		int flags = 0;

4935 4936 4937
		rcu_read_lock();

		dev = dev_get_by_index_rcu(net, iif);
L
Linus Torvalds 已提交
4938
		if (!dev) {
4939
			rcu_read_unlock();
L
Linus Torvalds 已提交
4940
			err = -ENODEV;
4941
			goto errout;
L
Linus Torvalds 已提交
4942
		}
4943 4944 4945 4946 4947 4948

		fl6.flowi6_iif = iif;

		if (!ipv6_addr_any(&fl6.saddr))
			flags |= RT6_LOOKUP_F_HAS_SADDR;

D
David Ahern 已提交
4949
		dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
4950 4951

		rcu_read_unlock();
4952 4953 4954
	} else {
		fl6.flowi6_oif = oif;

4955
		dst = ip6_route_output(net, NULL, &fl6);
4956 4957 4958 4959 4960 4961 4962 4963
	}


	rt = container_of(dst, struct rt6_info, dst);
	if (rt->dst.error) {
		err = rt->dst.error;
		ip6_rt_put(rt);
		goto errout;
L
Linus Torvalds 已提交
4964 4965
	}

4966 4967 4968 4969 4970 4971
	if (rt == net->ipv6.ip6_null_entry) {
		err = rt->dst.error;
		ip6_rt_put(rt);
		goto errout;
	}

4972
	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
4973
	if (!skb) {
A
Amerigo Wang 已提交
4974
		ip6_rt_put(rt);
4975 4976 4977
		err = -ENOBUFS;
		goto errout;
	}
L
Linus Torvalds 已提交
4978

4979
	skb_dst_set(skb, &rt->dst);
4980 4981 4982 4983

	rcu_read_lock();
	from = rcu_dereference(rt->from);

4984
	if (fibmatch)
4985
		err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif,
4986 4987 4988
				    RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
				    nlh->nlmsg_seq, 0);
	else
4989 4990
		err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
				    &fl6.saddr, iif, RTM_NEWROUTE,
4991 4992
				    NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
				    0);
4993 4994
	rcu_read_unlock();

L
Linus Torvalds 已提交
4995
	if (err < 0) {
4996 4997
		kfree_skb(skb);
		goto errout;
L
Linus Torvalds 已提交
4998 4999
	}

5000
	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5001
errout:
L
Linus Torvalds 已提交
5002 5003 5004
	return err;
}

5005
void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
5006
		     unsigned int nlm_flags)
L
Linus Torvalds 已提交
5007 5008
{
	struct sk_buff *skb;
5009
	struct net *net = info->nl_net;
5010 5011 5012 5013
	u32 seq;
	int err;

	err = -ENOBUFS;
5014
	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5015

5016
	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5017
	if (!skb)
5018 5019
		goto errout;

5020 5021
	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
			    event, info->portid, seq, nlm_flags);
5022 5023 5024 5025 5026 5027
	if (err < 0) {
		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
		WARN_ON(err == -EMSGSIZE);
		kfree_skb(skb);
		goto errout;
	}
5028
	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5029 5030
		    info->nlh, gfp_any());
	return;
5031 5032
errout:
	if (err < 0)
5033
		rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
L
Linus Torvalds 已提交
5034 5035
}

5036
static int ip6_route_dev_notify(struct notifier_block *this,
5037
				unsigned long event, void *ptr)
5038
{
5039
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5040
	struct net *net = dev_net(dev);
5041

5042 5043 5044 5045
	if (!(dev->flags & IFF_LOOPBACK))
		return NOTIFY_OK;

	if (event == NETDEV_REGISTER) {
D
David Ahern 已提交
5046
		net->ipv6.fib6_null_entry->fib6_nh.nh_dev = dev;
5047
		net->ipv6.ip6_null_entry->dst.dev = dev;
5048 5049
		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5050
		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
5051
		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
5052
		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
5053
		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
5054
#endif
5055 5056 5057 5058 5059
	 } else if (event == NETDEV_UNREGISTER &&
		    dev->reg_state != NETREG_UNREGISTERED) {
		/* NETDEV_UNREGISTER could be fired for multiple times by
		 * netdev_wait_allrefs(). Make sure we only call this once.
		 */
5060
		in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
5061
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5062 5063
		in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
		in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
5064 5065 5066 5067 5068 5069
#endif
	}

	return NOTIFY_OK;
}

L
Linus Torvalds 已提交
5070 5071 5072 5073 5074 5075 5076
/*
 *	/proc
 */

#ifdef CONFIG_PROC_FS
static int rt6_stats_seq_show(struct seq_file *seq, void *v)
{
5077
	struct net *net = (struct net *)seq->private;
L
Linus Torvalds 已提交
5078
	seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
5079 5080
		   net->ipv6.rt6_stats->fib_nodes,
		   net->ipv6.rt6_stats->fib_route_nodes,
W
Wei Wang 已提交
5081
		   atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
5082 5083
		   net->ipv6.rt6_stats->fib_rt_entries,
		   net->ipv6.rt6_stats->fib_rt_cache,
5084
		   dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
5085
		   net->ipv6.rt6_stats->fib_discarded_routes);
L
Linus Torvalds 已提交
5086 5087 5088 5089 5090 5091 5092 5093

	return 0;
}
#endif	/* CONFIG_PROC_FS */

#ifdef CONFIG_SYSCTL

static
5094
int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
L
Linus Torvalds 已提交
5095 5096
			      void __user *buffer, size_t *lenp, loff_t *ppos)
{
5097 5098 5099
	struct net *net;
	int delay;
	if (!write)
L
Linus Torvalds 已提交
5100
		return -EINVAL;
5101 5102 5103 5104

	net = (struct net *)ctl->extra1;
	delay = net->ipv6.sysctl.flush_delay;
	proc_dointvec(ctl, write, buffer, lenp, ppos);
5105
	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
5106
	return 0;
L
Linus Torvalds 已提交
5107 5108
}

5109
struct ctl_table ipv6_route_table_template[] = {
5110
	{
L
Linus Torvalds 已提交
5111
		.procname	=	"flush",
5112
		.data		=	&init_net.ipv6.sysctl.flush_delay,
L
Linus Torvalds 已提交
5113
		.maxlen		=	sizeof(int),
5114
		.mode		=	0200,
A
Alexey Dobriyan 已提交
5115
		.proc_handler	=	ipv6_sysctl_rtcache_flush
L
Linus Torvalds 已提交
5116 5117 5118
	},
	{
		.procname	=	"gc_thresh",
5119
		.data		=	&ip6_dst_ops_template.gc_thresh,
L
Linus Torvalds 已提交
5120 5121
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5122
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
5123 5124 5125
	},
	{
		.procname	=	"max_size",
5126
		.data		=	&init_net.ipv6.sysctl.ip6_rt_max_size,
L
Linus Torvalds 已提交
5127 5128
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5129
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
5130 5131 5132
	},
	{
		.procname	=	"gc_min_interval",
5133
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
L
Linus Torvalds 已提交
5134 5135
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5136
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
5137 5138 5139
	},
	{
		.procname	=	"gc_timeout",
5140
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_timeout,
L
Linus Torvalds 已提交
5141 5142
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5143
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
5144 5145 5146
	},
	{
		.procname	=	"gc_interval",
5147
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_interval,
L
Linus Torvalds 已提交
5148 5149
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5150
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
5151 5152 5153
	},
	{
		.procname	=	"gc_elasticity",
5154
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
L
Linus Torvalds 已提交
5155 5156
		.maxlen		=	sizeof(int),
		.mode		=	0644,
5157
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
5158 5159 5160
	},
	{
		.procname	=	"mtu_expires",
5161
		.data		=	&init_net.ipv6.sysctl.ip6_rt_mtu_expires,
L
Linus Torvalds 已提交
5162 5163
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5164
		.proc_handler	=	proc_dointvec_jiffies,
L
Linus Torvalds 已提交
5165 5166 5167
	},
	{
		.procname	=	"min_adv_mss",
5168
		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
L
Linus Torvalds 已提交
5169 5170
		.maxlen		=	sizeof(int),
		.mode		=	0644,
5171
		.proc_handler	=	proc_dointvec,
L
Linus Torvalds 已提交
5172 5173 5174
	},
	{
		.procname	=	"gc_min_interval_ms",
5175
		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
L
Linus Torvalds 已提交
5176 5177
		.maxlen		=	sizeof(int),
		.mode		=	0644,
A
Alexey Dobriyan 已提交
5178
		.proc_handler	=	proc_dointvec_ms_jiffies,
L
Linus Torvalds 已提交
5179
	},
5180
	{ }
L
Linus Torvalds 已提交
5181 5182
};

5183
struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
5184 5185 5186 5187 5188 5189
{
	struct ctl_table *table;

	table = kmemdup(ipv6_route_table_template,
			sizeof(ipv6_route_table_template),
			GFP_KERNEL);
5190 5191 5192

	if (table) {
		table[0].data = &net->ipv6.sysctl.flush_delay;
5193
		table[0].extra1 = net;
5194
		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
5195 5196 5197 5198 5199 5200 5201
		table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
		table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
		table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
		table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
5202
		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5203 5204 5205 5206

		/* Don't export sysctls to unprivileged users */
		if (net->user_ns != &init_user_ns)
			table[0].procname = NULL;
5207 5208
	}

5209 5210
	return table;
}
L
Linus Torvalds 已提交
5211 5212
#endif

5213
static int __net_init ip6_route_net_init(struct net *net)
5214
{
5215
	int ret = -ENOMEM;
5216

5217 5218
	memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
	       sizeof(net->ipv6.ip6_dst_ops));
5219

5220 5221 5222
	if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
		goto out_ip6_dst_ops;

D
David Ahern 已提交
5223 5224 5225 5226 5227 5228
	net->ipv6.fib6_null_entry = kmemdup(&fib6_null_entry_template,
					    sizeof(*net->ipv6.fib6_null_entry),
					    GFP_KERNEL);
	if (!net->ipv6.fib6_null_entry)
		goto out_ip6_dst_entries;

5229 5230 5231 5232
	net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
					   sizeof(*net->ipv6.ip6_null_entry),
					   GFP_KERNEL);
	if (!net->ipv6.ip6_null_entry)
D
David Ahern 已提交
5233
		goto out_fib6_null_entry;
5234
	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5235 5236
	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
			 ip6_template_metrics, true);
5237 5238

#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5239
	net->ipv6.fib6_has_custom_rules = false;
5240 5241 5242
	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
					       sizeof(*net->ipv6.ip6_prohibit_entry),
					       GFP_KERNEL);
5243 5244
	if (!net->ipv6.ip6_prohibit_entry)
		goto out_ip6_null_entry;
5245
	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5246 5247
	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
			 ip6_template_metrics, true);
5248 5249 5250 5251

	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
					       sizeof(*net->ipv6.ip6_blk_hole_entry),
					       GFP_KERNEL);
5252 5253
	if (!net->ipv6.ip6_blk_hole_entry)
		goto out_ip6_prohibit_entry;
5254
	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5255 5256
	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
			 ip6_template_metrics, true);
5257 5258
#endif

5259 5260 5261 5262 5263 5264 5265 5266 5267
	net->ipv6.sysctl.flush_delay = 0;
	net->ipv6.sysctl.ip6_rt_max_size = 4096;
	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
	net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;

5268 5269
	net->ipv6.ip6_rt_gc_expire = 30*HZ;

5270 5271 5272
	ret = 0;
out:
	return ret;
5273

5274 5275 5276 5277 5278 5279
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
out_ip6_prohibit_entry:
	kfree(net->ipv6.ip6_prohibit_entry);
out_ip6_null_entry:
	kfree(net->ipv6.ip6_null_entry);
#endif
D
David Ahern 已提交
5280 5281
out_fib6_null_entry:
	kfree(net->ipv6.fib6_null_entry);
5282 5283
out_ip6_dst_entries:
	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5284 5285
out_ip6_dst_ops:
	goto out;
5286 5287
}

5288
static void __net_exit ip6_route_net_exit(struct net *net)
5289
{
D
David Ahern 已提交
5290
	kfree(net->ipv6.fib6_null_entry);
5291 5292 5293 5294 5295
	kfree(net->ipv6.ip6_null_entry);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
	kfree(net->ipv6.ip6_prohibit_entry);
	kfree(net->ipv6.ip6_blk_hole_entry);
#endif
5296
	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5297 5298
}

5299 5300 5301
static int __net_init ip6_route_net_init_late(struct net *net)
{
#ifdef CONFIG_PROC_FS
5302 5303
	proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
			sizeof(struct ipv6_route_iter));
5304 5305
	proc_create_net_single("rt6_stats", 0444, net->proc_net,
			rt6_stats_seq_show, NULL);
5306 5307 5308 5309 5310 5311 5312
#endif
	return 0;
}

static void __net_exit ip6_route_net_exit_late(struct net *net)
{
#ifdef CONFIG_PROC_FS
5313 5314
	remove_proc_entry("ipv6_route", net->proc_net);
	remove_proc_entry("rt6_stats", net->proc_net);
5315 5316 5317
#endif
}

5318 5319 5320 5321 5322
static struct pernet_operations ip6_route_net_ops = {
	.init = ip6_route_net_init,
	.exit = ip6_route_net_exit,
};

5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338
static int __net_init ipv6_inetpeer_init(struct net *net)
{
	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);

	if (!bp)
		return -ENOMEM;
	inet_peer_base_init(bp);
	net->ipv6.peers = bp;
	return 0;
}

static void __net_exit ipv6_inetpeer_exit(struct net *net)
{
	struct inet_peer_base *bp = net->ipv6.peers;

	net->ipv6.peers = NULL;
5339
	inetpeer_invalidate_tree(bp);
5340 5341 5342
	kfree(bp);
}

5343
static struct pernet_operations ipv6_inetpeer_ops = {
5344 5345 5346 5347
	.init	=	ipv6_inetpeer_init,
	.exit	=	ipv6_inetpeer_exit,
};

5348 5349 5350 5351 5352
static struct pernet_operations ip6_route_net_late_ops = {
	.init = ip6_route_net_init_late,
	.exit = ip6_route_net_exit_late,
};

5353 5354
static struct notifier_block ip6_route_dev_notifier = {
	.notifier_call = ip6_route_dev_notify,
5355
	.priority = ADDRCONF_NOTIFY_PRIORITY - 10,
5356 5357
};

5358 5359 5360 5361 5362
void __init ip6_route_init_special_entries(void)
{
	/* Registering of the loopback is done before this portion of code,
	 * the loopback reference in rt6_info will not be taken, do it
	 * manually for init_net */
D
David Ahern 已提交
5363
	init_net.ipv6.fib6_null_entry->fib6_nh.nh_dev = init_net.loopback_dev;
5364 5365 5366 5367 5368 5369 5370 5371 5372 5373
	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
  #ifdef CONFIG_IPV6_MULTIPLE_TABLES
	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
  #endif
}

5374
int __init ip6_route_init(void)
L
Linus Torvalds 已提交
5375
{
5376
	int ret;
5377
	int cpu;
5378

5379 5380
	ret = -ENOMEM;
	ip6_dst_ops_template.kmem_cachep =
A
Alexey Dobriyan 已提交
5381
		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
5382
				  SLAB_HWCACHE_ALIGN, NULL);
5383
	if (!ip6_dst_ops_template.kmem_cachep)
5384
		goto out;
5385

5386
	ret = dst_entries_init(&ip6_dst_blackhole_ops);
5387
	if (ret)
5388 5389
		goto out_kmem_cache;

5390 5391
	ret = register_pernet_subsys(&ipv6_inetpeer_ops);
	if (ret)
5392
		goto out_dst_entries;
5393

5394 5395 5396
	ret = register_pernet_subsys(&ip6_route_net_ops);
	if (ret)
		goto out_register_inetpeer;
5397

5398 5399
	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;

5400
	ret = fib6_init();
5401
	if (ret)
5402
		goto out_register_subsys;
5403 5404 5405

	ret = xfrm6_init();
	if (ret)
5406
		goto out_fib6_init;
5407

5408 5409 5410
	ret = fib6_rules_init();
	if (ret)
		goto xfrm6_init;
5411

5412 5413 5414 5415
	ret = register_pernet_subsys(&ip6_route_net_late_ops);
	if (ret)
		goto fib6_rules_init;

5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429
	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
				   inet6_rtm_newroute, NULL, 0);
	if (ret < 0)
		goto out_register_late_subsys;

	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
				   inet6_rtm_delroute, NULL, 0);
	if (ret < 0)
		goto out_register_late_subsys;

	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
				   inet6_rtm_getroute, NULL,
				   RTNL_FLAG_DOIT_UNLOCKED);
	if (ret < 0)
5430
		goto out_register_late_subsys;
5431

5432
	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
5433
	if (ret)
5434
		goto out_register_late_subsys;
5435

5436 5437 5438 5439 5440 5441 5442
	for_each_possible_cpu(cpu) {
		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);

		INIT_LIST_HEAD(&ul->head);
		spin_lock_init(&ul->lock);
	}

5443 5444 5445
out:
	return ret;

5446
out_register_late_subsys:
5447
	rtnl_unregister_all(PF_INET6);
5448
	unregister_pernet_subsys(&ip6_route_net_late_ops);
5449 5450 5451 5452
fib6_rules_init:
	fib6_rules_cleanup();
xfrm6_init:
	xfrm6_fini();
5453 5454
out_fib6_init:
	fib6_gc_cleanup();
5455 5456
out_register_subsys:
	unregister_pernet_subsys(&ip6_route_net_ops);
5457 5458
out_register_inetpeer:
	unregister_pernet_subsys(&ipv6_inetpeer_ops);
5459 5460
out_dst_entries:
	dst_entries_destroy(&ip6_dst_blackhole_ops);
5461
out_kmem_cache:
5462
	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
5463
	goto out;
L
Linus Torvalds 已提交
5464 5465 5466 5467
}

void ip6_route_cleanup(void)
{
5468
	unregister_netdevice_notifier(&ip6_route_dev_notifier);
5469
	unregister_pernet_subsys(&ip6_route_net_late_ops);
T
Thomas Graf 已提交
5470
	fib6_rules_cleanup();
L
Linus Torvalds 已提交
5471 5472
	xfrm6_fini();
	fib6_gc_cleanup();
5473
	unregister_pernet_subsys(&ipv6_inetpeer_ops);
5474
	unregister_pernet_subsys(&ip6_route_net_ops);
5475
	dst_entries_destroy(&ip6_dst_blackhole_ops);
5476
	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
L
Linus Torvalds 已提交
5477
}