route.c 75.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		ROUTE - implementation of the IP router.
 *
8
 * Authors:	Ross Biro
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19 20
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *
 * Fixes:
 *		Alan Cox	:	Verify area fixes.
 *		Alan Cox	:	cli() protects routing changes
 *		Rui Oliveira	:	ICMP routing table updates
 *		(rco@di.uminho.pt)	Routing table insertion and update
 *		Linus Torvalds	:	Rewrote bits to be sensible
 *		Alan Cox	:	Added BSD route gw semantics
21
 *		Alan Cox	:	Super /proc >4K
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *		Alan Cox	:	MTU in route table
 *		Alan Cox	: 	MSS actually. Also added the window
 *					clamper.
 *		Sam Lantinga	:	Fixed route matching in rt_del()
 *		Alan Cox	:	Routing cache support.
 *		Alan Cox	:	Removed compatibility cruft.
 *		Alan Cox	:	RTF_REJECT support.
 *		Alan Cox	:	TCP irtt support.
 *		Jonathan Naylor	:	Added Metric support.
 *	Miquel van Smoorenburg	:	BSD API fixes.
 *	Miquel van Smoorenburg	:	Metrics.
 *		Alan Cox	:	Use __u32 properly
 *		Alan Cox	:	Aligned routing errors more closely with BSD
 *					our system is still very different.
 *		Alan Cox	:	Faster /proc handling
 *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
 *					routing caches and better behaviour.
39
 *
L
Linus Torvalds 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
 *		Olaf Erb	:	irtt wasn't being copied right.
 *		Bjorn Ekwall	:	Kerneld route support.
 *		Alan Cox	:	Multicast fixed (I hope)
 * 		Pavel Krauz	:	Limited broadcast fixed
 *		Mike McLagan	:	Routing by source
 *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
 *					route.c and rewritten from scratch.
 *		Andi Kleen	:	Load-limit warning messages.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
 *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
 *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
 *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
 *		Marc Boucher	:	routing by fwmark
 *	Robert Olsson		:	Added rt_cache statistics
 *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
55
 *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
56 57
 * 	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
 * 	Ilia Sotnikov		:	Removed TOS from hash calculations
L
Linus Torvalds 已提交
58 59 60 61 62 63 64
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */

65 66
#define pr_fmt(fmt) "IPv4: " fmt

L
Linus Torvalds 已提交
67
#include <linux/module.h>
68
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
91
#include <linux/slab.h>
E
Eric Dumazet 已提交
92
#include <linux/jhash.h>
93
#include <net/dst.h>
94
#include <net/dst_metadata.h>
95
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
96 97 98 99 100 101 102 103 104 105
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
106
#include <net/lwtunnel.h>
107
#include <net/netevent.h>
108
#include <net/rtnetlink.h>
L
Linus Torvalds 已提交
109 110
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
111
#include <linux/kmemleak.h>
L
Linus Torvalds 已提交
112
#endif
113
#include <net/secure_seq.h>
114
#include <net/ip_tunnels.h>
115
#include <net/l3mdev.h>
L
Linus Torvalds 已提交
116

117 118
#include "fib_lookup.h"

119
#define RT_FL_TOS(oldflp4) \
120
	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
L
Linus Torvalds 已提交
121 122 123 124

#define RT_GC_TIMEOUT (300*HZ)

static int ip_rt_max_size;
125 126 127 128 129 130 131 132
static int ip_rt_redirect_number __read_mostly	= 9;
static int ip_rt_redirect_load __read_mostly	= HZ / 50;
static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly	= HZ;
static int ip_rt_error_burst __read_mostly	= 5 * HZ;
static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly	= 256;
133

134
static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
L
Linus Torvalds 已提交
135 136 137 138 139
/*
 *	Interface to generic destination cache.
 */

static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
140
static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
141
static unsigned int	 ipv4_mtu(const struct dst_entry *dst);
L
Linus Torvalds 已提交
142 143
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void		 ipv4_link_failure(struct sk_buff *skb);
144 145 146 147
static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
					   struct sk_buff *skb, u32 mtu);
static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
					struct sk_buff *skb);
148
static void		ipv4_dst_destroy(struct dst_entry *dst);
L
Linus Torvalds 已提交
149

150 151
static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
{
152 153
	WARN_ON(1);
	return NULL;
154 155
}

156 157 158
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
					   struct sk_buff *skb,
					   const void *daddr);
159
static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
160

L
Linus Torvalds 已提交
161 162 163
static struct dst_ops ipv4_dst_ops = {
	.family =		AF_INET,
	.check =		ipv4_dst_check,
164
	.default_advmss =	ipv4_default_advmss,
165
	.mtu =			ipv4_mtu,
166
	.cow_metrics =		ipv4_cow_metrics,
167
	.destroy =		ipv4_dst_destroy,
L
Linus Torvalds 已提交
168 169 170
	.negative_advice =	ipv4_negative_advice,
	.link_failure =		ipv4_link_failure,
	.update_pmtu =		ip_rt_update_pmtu,
171
	.redirect =		ip_do_redirect,
172
	.local_out =		__ip_local_out,
173
	.neigh_lookup =		ipv4_neigh_lookup,
174
	.confirm_neigh =	ipv4_confirm_neigh,
L
Linus Torvalds 已提交
175 176 177 178
};

#define ECN_OR_COST(class)	TC_PRIO_##class

179
const __u8 ip_tos2prio[16] = {
L
Linus Torvalds 已提交
180
	TC_PRIO_BESTEFFORT,
D
Dan Siemon 已提交
181
	ECN_OR_COST(BESTEFFORT),
L
Linus Torvalds 已提交
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	TC_PRIO_BESTEFFORT,
	ECN_OR_COST(BESTEFFORT),
	TC_PRIO_BULK,
	ECN_OR_COST(BULK),
	TC_PRIO_BULK,
	ECN_OR_COST(BULK),
	TC_PRIO_INTERACTIVE,
	ECN_OR_COST(INTERACTIVE),
	TC_PRIO_INTERACTIVE,
	ECN_OR_COST(INTERACTIVE),
	TC_PRIO_INTERACTIVE_BULK,
	ECN_OR_COST(INTERACTIVE_BULK),
	TC_PRIO_INTERACTIVE_BULK,
	ECN_OR_COST(INTERACTIVE_BULK)
};
197
EXPORT_SYMBOL(ip_tos2prio);
L
Linus Torvalds 已提交
198

199
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
200
#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
L
Linus Torvalds 已提交
201 202 203 204

#ifdef CONFIG_PROC_FS
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
205
	if (*pos)
D
David S. Miller 已提交
206
		return NULL;
207
	return SEQ_START_TOKEN;
L
Linus Torvalds 已提交
208 209 210 211 212
}

static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
D
David S. Miller 已提交
213
	return NULL;
L
Linus Torvalds 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226
}

static void rt_cache_seq_stop(struct seq_file *seq, void *v)
{
}

static int rt_cache_seq_show(struct seq_file *seq, void *v)
{
	if (v == SEQ_START_TOKEN)
		seq_printf(seq, "%-127s\n",
			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
			   "HHUptod\tSpecDst");
227
	return 0;
L
Linus Torvalds 已提交
228 229
}

230
static const struct seq_operations rt_cache_seq_ops = {
L
Linus Torvalds 已提交
231 232 233 234 235 236 237 238
	.start  = rt_cache_seq_start,
	.next   = rt_cache_seq_next,
	.stop   = rt_cache_seq_stop,
	.show   = rt_cache_seq_show,
};

static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
D
David S. Miller 已提交
239
	return seq_open(file, &rt_cache_seq_ops);
L
Linus Torvalds 已提交
240 241
}

242
static const struct file_operations rt_cache_seq_fops = {
L
Linus Torvalds 已提交
243 244 245 246
	.owner	 = THIS_MODULE,
	.open	 = rt_cache_seq_open,
	.read	 = seq_read,
	.llseek	 = seq_lseek,
D
David S. Miller 已提交
247
	.release = seq_release,
L
Linus Torvalds 已提交
248 249 250 251 252 253 254 255 256 257
};


static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
	int cpu;

	if (*pos == 0)
		return SEQ_START_TOKEN;

258
	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
L
Linus Torvalds 已提交
259 260 261
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu+1;
262
		return &per_cpu(rt_cache_stat, cpu);
L
Linus Torvalds 已提交
263 264 265 266 267 268 269 270
	}
	return NULL;
}

static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	int cpu;

271
	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
L
Linus Torvalds 已提交
272 273 274
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu+1;
275
		return &per_cpu(rt_cache_stat, cpu);
L
Linus Torvalds 已提交
276 277
	}
	return NULL;
278

L
Linus Torvalds 已提交
279 280 281 282 283 284 285 286 287 288 289 290
}

static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
{

}

static int rt_cpu_seq_show(struct seq_file *seq, void *v)
{
	struct rt_cache_stat *st = v;

	if (v == SEQ_START_TOKEN) {
291
		seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
L
Linus Torvalds 已提交
292 293
		return 0;
	}
294

L
Linus Torvalds 已提交
295 296
	seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
		   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
297
		   dst_entries_get_slow(&ipv4_dst_ops),
E
Eric Dumazet 已提交
298
		   0, /* st->in_hit */
L
Linus Torvalds 已提交
299 300 301 302 303 304 305
		   st->in_slow_tot,
		   st->in_slow_mc,
		   st->in_no_route,
		   st->in_brd,
		   st->in_martian_dst,
		   st->in_martian_src,

E
Eric Dumazet 已提交
306
		   0, /* st->out_hit */
L
Linus Torvalds 已提交
307
		   st->out_slow_tot,
308
		   st->out_slow_mc,
L
Linus Torvalds 已提交
309

E
Eric Dumazet 已提交
310 311 312 313 314 315
		   0, /* st->gc_total */
		   0, /* st->gc_ignored */
		   0, /* st->gc_goal_miss */
		   0, /* st->gc_dst_overflow */
		   0, /* st->in_hlist_search */
		   0  /* st->out_hlist_search */
L
Linus Torvalds 已提交
316 317 318 319
		);
	return 0;
}

320
static const struct seq_operations rt_cpu_seq_ops = {
L
Linus Torvalds 已提交
321 322 323 324 325 326 327 328 329 330 331 332
	.start  = rt_cpu_seq_start,
	.next   = rt_cpu_seq_next,
	.stop   = rt_cpu_seq_stop,
	.show   = rt_cpu_seq_show,
};


static int rt_cpu_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &rt_cpu_seq_ops);
}

333
static const struct file_operations rt_cpu_seq_fops = {
L
Linus Torvalds 已提交
334 335 336 337 338 339 340
	.owner	 = THIS_MODULE,
	.open	 = rt_cpu_seq_open,
	.read	 = seq_read,
	.llseek	 = seq_lseek,
	.release = seq_release,
};

341
#ifdef CONFIG_IP_ROUTE_CLASSID
342
static int rt_acct_proc_show(struct seq_file *m, void *v)
343
{
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	struct ip_rt_acct *dst, *src;
	unsigned int i, j;

	dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
	if (!dst)
		return -ENOMEM;

	for_each_possible_cpu(i) {
		src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
		for (j = 0; j < 256; j++) {
			dst[j].o_bytes   += src[j].o_bytes;
			dst[j].o_packets += src[j].o_packets;
			dst[j].i_bytes   += src[j].i_bytes;
			dst[j].i_packets += src[j].i_packets;
		}
359 360
	}

361 362 363 364
	seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
	kfree(dst);
	return 0;
}
365

366 367 368
static int rt_acct_proc_open(struct inode *inode, struct file *file)
{
	return single_open(file, rt_acct_proc_show, NULL);
369
}
370 371 372 373 374 375 376 377

static const struct file_operations rt_acct_proc_fops = {
	.owner		= THIS_MODULE,
	.open		= rt_acct_proc_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};
378
#endif
379

380
static int __net_init ip_rt_do_proc_init(struct net *net)
381 382 383
{
	struct proc_dir_entry *pde;

384 385
	pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
			  &rt_cache_seq_fops);
386 387 388
	if (!pde)
		goto err1;

389 390
	pde = proc_create("rt_cache", S_IRUGO,
			  net->proc_net_stat, &rt_cpu_seq_fops);
391 392 393
	if (!pde)
		goto err2;

394
#ifdef CONFIG_IP_ROUTE_CLASSID
395
	pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
396 397 398 399 400
	if (!pde)
		goto err3;
#endif
	return 0;

401
#ifdef CONFIG_IP_ROUTE_CLASSID
402 403 404 405 406 407 408 409
err3:
	remove_proc_entry("rt_cache", net->proc_net_stat);
#endif
err2:
	remove_proc_entry("rt_cache", net->proc_net);
err1:
	return -ENOMEM;
}
410 411 412 413 414

static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
	remove_proc_entry("rt_cache", net->proc_net_stat);
	remove_proc_entry("rt_cache", net->proc_net);
415
#ifdef CONFIG_IP_ROUTE_CLASSID
416
	remove_proc_entry("rt_acct", net->proc_net);
417
#endif
418 419 420 421 422 423 424 425 426 427 428 429
}

static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
	.init = ip_rt_do_proc_init,
	.exit = ip_rt_do_proc_exit,
};

static int __init ip_rt_proc_init(void)
{
	return register_pernet_subsys(&ip_rt_proc_ops);
}

430
#else
431
static inline int ip_rt_proc_init(void)
432 433 434
{
	return 0;
}
L
Linus Torvalds 已提交
435
#endif /* CONFIG_PROC_FS */
436

437
static inline bool rt_is_expired(const struct rtable *rth)
438
{
F
fan.du 已提交
439
	return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
440 441
}

442
void rt_cache_flush(struct net *net)
L
Linus Torvalds 已提交
443
{
F
fan.du 已提交
444
	rt_genid_bump_ipv4(net);
E
Eric Dumazet 已提交
445 446
}

447 448 449
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
					   struct sk_buff *skb,
					   const void *daddr)
D
David Miller 已提交
450
{
451 452
	struct net_device *dev = dst->dev;
	const __be32 *pkey = daddr;
453
	const struct rtable *rt;
D
David Miller 已提交
454 455
	struct neighbour *n;

456
	rt = (const struct rtable *) dst;
457
	if (rt->rt_gateway)
458
		pkey = (const __be32 *) &rt->rt_gateway;
459 460
	else if (skb)
		pkey = &ip_hdr(skb)->daddr;
461

462
	n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
463 464
	if (n)
		return n;
465
	return neigh_create(&arp_tbl, pkey, dev);
466 467
}

468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
	struct net_device *dev = dst->dev;
	const __be32 *pkey = daddr;
	const struct rtable *rt;

	rt = (const struct rtable *)dst;
	if (rt->rt_gateway)
		pkey = (const __be32 *)&rt->rt_gateway;
	else if (!daddr ||
		 (rt->rt_flags &
		  (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
		return;

	__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
}

485 486
#define IP_IDENTS_SZ 2048u

E
Eric Dumazet 已提交
487 488
static atomic_t *ip_idents __read_mostly;
static u32 *ip_tstamps __read_mostly;
489 490 491 492 493 494 495

/* In order to protect privacy, we add a perturbation to identifiers
 * if one generator is seldom used. This makes hard for an attacker
 * to infer how many packets were sent between two points in time.
 */
u32 ip_idents_reserve(u32 hash, int segs)
{
E
Eric Dumazet 已提交
496 497 498
	u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
	atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
	u32 old = ACCESS_ONCE(*p_tstamp);
499
	u32 now = (u32)jiffies;
500
	u32 new, delta = 0;
501

E
Eric Dumazet 已提交
502
	if (old != now && cmpxchg(p_tstamp, old, now) == old)
503 504
		delta = prandom_u32_max(now - old);

505 506 507 508 509 510 511
	/* Do not use atomic_add_return() as it makes UBSAN unhappy */
	do {
		old = (u32)atomic_read(p_id);
		new = old + delta + segs;
	} while (atomic_cmpxchg(p_id, old, new) != old);

	return new - segs;
512 513
}
EXPORT_SYMBOL(ip_idents_reserve);
L
Linus Torvalds 已提交
514

515
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
L
Linus Torvalds 已提交
516
{
E
Eric Dumazet 已提交
517 518
	static u32 ip_idents_hashrnd __read_mostly;
	u32 hash, id;
L
Linus Torvalds 已提交
519

E
Eric Dumazet 已提交
520
	net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
L
Linus Torvalds 已提交
521

522 523
	hash = jhash_3words((__force u32)iph->daddr,
			    (__force u32)iph->saddr,
524
			    iph->protocol ^ net_hash_mix(net),
525
			    ip_idents_hashrnd);
E
Eric Dumazet 已提交
526 527
	id = ip_idents_reserve(hash, segs);
	iph->id = htons(id);
L
Linus Torvalds 已提交
528
}
E
Eric Dumazet 已提交
529
EXPORT_SYMBOL(__ip_select_ident);
L
Linus Torvalds 已提交
530

531 532
static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
			     const struct sock *sk,
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
			     const struct iphdr *iph,
			     int oif, u8 tos,
			     u8 prot, u32 mark, int flow_flags)
{
	if (sk) {
		const struct inet_sock *inet = inet_sk(sk);

		oif = sk->sk_bound_dev_if;
		mark = sk->sk_mark;
		tos = RT_CONN_FLAGS(sk);
		prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
	}
	flowi4_init_output(fl4, oif, mark, tos,
			   RT_SCOPE_UNIVERSE, prot,
			   flow_flags,
548 549
			   iph->daddr, iph->saddr, 0, 0,
			   sock_net_uid(net, sk));
550 551
}

E
Eric Dumazet 已提交
552 553
static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
			       const struct sock *sk)
554
{
555
	const struct net *net = dev_net(skb->dev);
556 557 558 559 560 561
	const struct iphdr *iph = ip_hdr(skb);
	int oif = skb->dev->ifindex;
	u8 tos = RT_TOS(iph->tos);
	u8 prot = iph->protocol;
	u32 mark = skb->mark;

562
	__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
563 564
}

E
Eric Dumazet 已提交
565
static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
566 567
{
	const struct inet_sock *inet = inet_sk(sk);
E
Eric Dumazet 已提交
568
	const struct ip_options_rcu *inet_opt;
569 570 571 572 573 574 575 576 577 578
	__be32 daddr = inet->inet_daddr;

	rcu_read_lock();
	inet_opt = rcu_dereference(inet->inet_opt);
	if (inet_opt && inet_opt->opt.srr)
		daddr = inet_opt->opt.faddr;
	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
			   inet_sk_flowi_flags(sk),
579
			   daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
580 581 582
	rcu_read_unlock();
}

E
Eric Dumazet 已提交
583 584
static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
				 const struct sk_buff *skb)
585 586 587 588 589 590 591
{
	if (skb)
		build_skb_flow_key(fl4, skb, sk);
	else
		build_sk_flow_key(fl4, sk);
}

592 593 594 595 596 597
static inline void rt_free(struct rtable *rt)
{
	call_rcu(&rt->dst.rcu_head, dst_rcu_free);
}

static DEFINE_SPINLOCK(fnhe_lock);
598

599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
{
	struct rtable *rt;

	rt = rcu_dereference(fnhe->fnhe_rth_input);
	if (rt) {
		RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
		rt_free(rt);
	}
	rt = rcu_dereference(fnhe->fnhe_rth_output);
	if (rt) {
		RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
		rt_free(rt);
	}
}

615
static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
616 617 618 619 620 621 622 623 624
{
	struct fib_nh_exception *fnhe, *oldest;

	oldest = rcu_dereference(hash->chain);
	for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
		if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
			oldest = fnhe;
	}
625
	fnhe_flush_routes(oldest);
626 627 628
	return oldest;
}

629 630
static inline u32 fnhe_hashfun(__be32 daddr)
{
E
Eric Dumazet 已提交
631
	static u32 fnhe_hashrnd __read_mostly;
632 633
	u32 hval;

E
Eric Dumazet 已提交
634 635 636
	net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
	hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
	return hash_32(hval, FNHE_HASH_SHIFT);
637 638
}

639 640 641 642 643 644 645 646 647 648 649 650
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
{
	rt->rt_pmtu = fnhe->fnhe_pmtu;
	rt->dst.expires = fnhe->fnhe_expires;

	if (fnhe->fnhe_gw) {
		rt->rt_flags |= RTCF_REDIRECTED;
		rt->rt_gateway = fnhe->fnhe_gw;
		rt->rt_uses_gateway = 1;
	}
}

651 652
static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
				  u32 pmtu, unsigned long expires)
653
{
654
	struct fnhe_hash_bucket *hash;
655
	struct fib_nh_exception *fnhe;
656 657
	struct rtable *rt;
	unsigned int i;
658
	int depth;
659 660
	u32 hval = fnhe_hashfun(daddr);

661
	spin_lock_bh(&fnhe_lock);
662

663
	hash = rcu_dereference(nh->nh_exceptions);
664
	if (!hash) {
665
		hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
666
		if (!hash)
667
			goto out_unlock;
668
		rcu_assign_pointer(nh->nh_exceptions, hash);
669 670 671 672 673 674 675 676
	}

	hash += hval;

	depth = 0;
	for (fnhe = rcu_dereference(hash->chain); fnhe;
	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
		if (fnhe->fnhe_daddr == daddr)
677
			break;
678 679 680
		depth++;
	}

681 682 683 684 685
	if (fnhe) {
		if (gw)
			fnhe->fnhe_gw = gw;
		if (pmtu) {
			fnhe->fnhe_pmtu = pmtu;
686
			fnhe->fnhe_expires = max(1UL, expires);
687
		}
688
		/* Update all cached dsts too */
689 690 691 692
		rt = rcu_dereference(fnhe->fnhe_rth_input);
		if (rt)
			fill_route_from_fnhe(rt, fnhe);
		rt = rcu_dereference(fnhe->fnhe_rth_output);
693 694
		if (rt)
			fill_route_from_fnhe(rt, fnhe);
695 696 697 698 699 700 701 702 703 704 705
	} else {
		if (depth > FNHE_RECLAIM_DEPTH)
			fnhe = fnhe_oldest(hash);
		else {
			fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
			if (!fnhe)
				goto out_unlock;

			fnhe->fnhe_next = hash->chain;
			rcu_assign_pointer(hash->chain, fnhe);
		}
706
		fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
707 708 709 710
		fnhe->fnhe_daddr = daddr;
		fnhe->fnhe_gw = gw;
		fnhe->fnhe_pmtu = pmtu;
		fnhe->fnhe_expires = expires;
711 712 713 714 715

		/* Exception created; mark the cached routes for the nexthop
		 * stale, so anyone caching it rechecks if this exception
		 * applies to them.
		 */
716 717 718 719
		rt = rcu_dereference(nh->nh_rth_input);
		if (rt)
			rt->dst.obsolete = DST_OBSOLETE_KILL;

720 721 722 723 724 725 726
		for_each_possible_cpu(i) {
			struct rtable __rcu **prt;
			prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
			rt = rcu_dereference(*prt);
			if (rt)
				rt->dst.obsolete = DST_OBSOLETE_KILL;
		}
727 728 729
	}

	fnhe->fnhe_stamp = jiffies;
730 731

out_unlock:
732
	spin_unlock_bh(&fnhe_lock);
733 734
}

735 736
static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
			     bool kill_route)
L
Linus Torvalds 已提交
737
{
738
	__be32 new_gw = icmp_hdr(skb)->un.gateway;
739
	__be32 old_gw = ip_hdr(skb)->saddr;
740 741
	struct net_device *dev = skb->dev;
	struct in_device *in_dev;
742
	struct fib_result res;
743
	struct neighbour *n;
744
	struct net *net;
L
Linus Torvalds 已提交
745

746 747 748 749 750 751 752 753 754 755 756
	switch (icmp_hdr(skb)->code & 7) {
	case ICMP_REDIR_NET:
	case ICMP_REDIR_NETTOS:
	case ICMP_REDIR_HOST:
	case ICMP_REDIR_HOSTTOS:
		break;

	default:
		return;
	}

757 758 759 760 761 762 763
	if (rt->rt_gateway != old_gw)
		return;

	in_dev = __in_dev_get_rcu(dev);
	if (!in_dev)
		return;

764
	net = dev_net(dev);
765 766 767
	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
	    ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
	    ipv4_is_zeronet(new_gw))
L
Linus Torvalds 已提交
768 769 770 771 772 773 774 775
		goto reject_redirect;

	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
			goto reject_redirect;
		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
			goto reject_redirect;
	} else {
776
		if (inet_addr_type(net, new_gw) != RTN_UNICAST)
L
Linus Torvalds 已提交
777 778 779
			goto reject_redirect;
	}

780 781 782
	n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
	if (!n)
		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
783
	if (!IS_ERR(n)) {
784 785 786
		if (!(n->nud_state & NUD_VALID)) {
			neigh_event_send(n, NULL);
		} else {
787
			if (fib_lookup(net, fl4, &res, 0) == 0) {
788 789
				struct fib_nh *nh = &FIB_RES_NH(res);

790
				update_or_create_fnhe(nh, fl4->daddr, new_gw,
791
						0, jiffies + ip_rt_gc_timeout);
792
			}
793 794
			if (kill_route)
				rt->dst.obsolete = DST_OBSOLETE_KILL;
795 796 797 798 799 800 801 802
			call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
		}
		neigh_release(n);
	}
	return;

reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
803 804 805 806 807
	if (IN_DEV_LOG_MARTIANS(in_dev)) {
		const struct iphdr *iph = (const struct iphdr *) skb->data;
		__be32 daddr = iph->daddr;
		__be32 saddr = iph->saddr;

808 809 810 811
		net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
				     "  Advised path = %pI4 -> %pI4\n",
				     &old_gw, dev->name, &new_gw,
				     &saddr, &daddr);
812
	}
813 814 815 816
#endif
	;
}

817 818 819 820
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
{
	struct rtable *rt;
	struct flowi4 fl4;
821
	const struct iphdr *iph = (const struct iphdr *) skb->data;
822
	struct net *net = dev_net(skb->dev);
823 824 825 826
	int oif = skb->dev->ifindex;
	u8 tos = RT_TOS(iph->tos);
	u8 prot = iph->protocol;
	u32 mark = skb->mark;
827 828 829

	rt = (struct rtable *) dst;

830
	__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
831
	__ip_do_redirect(rt, skb, &fl4, true);
832 833
}

L
Linus Torvalds 已提交
834 835
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
{
836
	struct rtable *rt = (struct rtable *)dst;
L
Linus Torvalds 已提交
837 838 839
	struct dst_entry *ret = dst;

	if (rt) {
T
Timo Teräs 已提交
840
		if (dst->obsolete > 0) {
L
Linus Torvalds 已提交
841 842
			ip_rt_put(rt);
			ret = NULL;
843 844
		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
			   rt->dst.expires) {
D
David S. Miller 已提交
845
			ip_rt_put(rt);
L
Linus Torvalds 已提交
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
			ret = NULL;
		}
	}
	return ret;
}

/*
 * Algorithm:
 *	1. The first ip_rt_redirect_number redirects are sent
 *	   with exponential backoff, then we stop sending them at all,
 *	   assuming that the host ignores our redirects.
 *	2. If we did not see packets requiring redirects
 *	   during ip_rt_redirect_silence, we assume that the host
 *	   forgot redirected route and start to send redirects again.
 *
 * This algorithm is much cheaper and more intelligent than dumb load limiting
 * in icmp.c.
 *
 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
 * and "frag. need" (breaks PMTU discovery) in icmp.c.
 */

void ip_rt_send_redirect(struct sk_buff *skb)
{
E
Eric Dumazet 已提交
870
	struct rtable *rt = skb_rtable(skb);
871
	struct in_device *in_dev;
872
	struct inet_peer *peer;
873
	struct net *net;
874
	int log_martians;
875
	int vif;
L
Linus Torvalds 已提交
876

877
	rcu_read_lock();
878
	in_dev = __in_dev_get_rcu(rt->dst.dev);
879 880
	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
		rcu_read_unlock();
L
Linus Torvalds 已提交
881
		return;
882 883
	}
	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
884
	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
885
	rcu_read_unlock();
L
Linus Torvalds 已提交
886

887
	net = dev_net(rt->dst.dev);
888
	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
889
	if (!peer) {
J
Julian Anastasov 已提交
890 891
		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
			  rt_nexthop(rt, ip_hdr(skb)->daddr));
892 893 894
		return;
	}

L
Linus Torvalds 已提交
895 896 897
	/* No redirected packets during ip_rt_redirect_silence;
	 * reset the algorithm.
	 */
898 899
	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
		peer->rate_tokens = 0;
L
Linus Torvalds 已提交
900 901

	/* Too many ignored redirects; do not send anything
902
	 * set dst.rate_last to the last seen redirected packet.
L
Linus Torvalds 已提交
903
	 */
904 905
	if (peer->rate_tokens >= ip_rt_redirect_number) {
		peer->rate_last = jiffies;
906
		goto out_put_peer;
L
Linus Torvalds 已提交
907 908 909 910 911
	}

	/* Check for load limit; set rate_last to the latest sent
	 * redirect.
	 */
912
	if (peer->rate_tokens == 0 ||
913
	    time_after(jiffies,
914 915
		       (peer->rate_last +
			(ip_rt_redirect_load << peer->rate_tokens)))) {
J
Julian Anastasov 已提交
916 917 918
		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);

		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
919 920
		peer->rate_last = jiffies;
		++peer->rate_tokens;
L
Linus Torvalds 已提交
921
#ifdef CONFIG_IP_ROUTE_VERBOSE
922
		if (log_martians &&
923 924
		    peer->rate_tokens == ip_rt_redirect_number)
			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
925
					     &ip_hdr(skb)->saddr, inet_iif(skb),
J
Julian Anastasov 已提交
926
					     &ip_hdr(skb)->daddr, &gw);
L
Linus Torvalds 已提交
927 928
#endif
	}
929 930
out_put_peer:
	inet_putpeer(peer);
L
Linus Torvalds 已提交
931 932 933 934
}

static int ip_error(struct sk_buff *skb)
{
935
	struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
E
Eric Dumazet 已提交
936
	struct rtable *rt = skb_rtable(skb);
937
	struct inet_peer *peer;
L
Linus Torvalds 已提交
938
	unsigned long now;
939
	struct net *net;
940
	bool send;
L
Linus Torvalds 已提交
941 942
	int code;

943 944 945 946
	/* IP on this device is disabled. */
	if (!in_dev)
		goto out;

947 948 949 950
	net = dev_net(rt->dst.dev);
	if (!IN_DEV_FORWARD(in_dev)) {
		switch (rt->dst.error) {
		case EHOSTUNREACH:
E
Eric Dumazet 已提交
951
			__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
952 953 954
			break;

		case ENETUNREACH:
E
Eric Dumazet 已提交
955
			__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
956 957 958 959 960
			break;
		}
		goto out;
	}

961
	switch (rt->dst.error) {
J
Joe Perches 已提交
962 963 964 965 966 967 968 969
	case EINVAL:
	default:
		goto out;
	case EHOSTUNREACH:
		code = ICMP_HOST_UNREACH;
		break;
	case ENETUNREACH:
		code = ICMP_NET_UNREACH;
E
Eric Dumazet 已提交
970
		__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
J
Joe Perches 已提交
971 972 973 974
		break;
	case EACCES:
		code = ICMP_PKT_FILTERED;
		break;
L
Linus Torvalds 已提交
975 976
	}

977
	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
978
			       l3mdev_master_ifindex(skb->dev), 1);
979 980 981 982 983 984 985 986 987 988 989 990

	send = true;
	if (peer) {
		now = jiffies;
		peer->rate_tokens += now - peer->rate_last;
		if (peer->rate_tokens > ip_rt_error_burst)
			peer->rate_tokens = ip_rt_error_burst;
		peer->rate_last = now;
		if (peer->rate_tokens >= ip_rt_error_cost)
			peer->rate_tokens -= ip_rt_error_cost;
		else
			send = false;
991
		inet_putpeer(peer);
L
Linus Torvalds 已提交
992
	}
993 994
	if (send)
		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
L
Linus Torvalds 已提交
995 996 997

out:	kfree_skb(skb);
	return 0;
998
}
L
Linus Torvalds 已提交
999

1000
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
L
Linus Torvalds 已提交
1001
{
1002
	struct dst_entry *dst = &rt->dst;
1003
	struct fib_result res;
1004

1005 1006 1007
	if (dst_metric_locked(dst, RTAX_MTU))
		return;

1008
	if (ipv4_mtu(dst) < mtu)
1009 1010
		return;

1011 1012
	if (mtu < ip_rt_min_pmtu)
		mtu = ip_rt_min_pmtu;
1013

1014 1015 1016 1017
	if (rt->rt_pmtu == mtu &&
	    time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
		return;

1018
	rcu_read_lock();
1019
	if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1020 1021
		struct fib_nh *nh = &FIB_RES_NH(res);

1022 1023
		update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
				      jiffies + ip_rt_mtu_expires);
1024
	}
1025
	rcu_read_unlock();
L
Linus Torvalds 已提交
1026 1027
}

1028 1029 1030 1031 1032 1033 1034
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
			      struct sk_buff *skb, u32 mtu)
{
	struct rtable *rt = (struct rtable *) dst;
	struct flowi4 fl4;

	ip_rt_build_flow_key(&fl4, sk, skb);
1035
	__ip_rt_update_pmtu(rt, &fl4, mtu);
1036 1037
}

1038 1039 1040
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
		      int oif, u32 mark, u8 protocol, int flow_flags)
{
1041
	const struct iphdr *iph = (const struct iphdr *) skb->data;
1042 1043 1044
	struct flowi4 fl4;
	struct rtable *rt;

1045 1046 1047
	if (!mark)
		mark = IP4_REPLY_MARK(net, skb->mark);

1048
	__build_flow_key(net, &fl4, NULL, iph, oif,
1049
			 RT_TOS(iph->tos), protocol, mark, flow_flags);
1050 1051
	rt = __ip_route_output_key(net, &fl4);
	if (!IS_ERR(rt)) {
1052
		__ip_rt_update_pmtu(rt, &fl4, mtu);
1053 1054 1055 1056 1057
		ip_rt_put(rt);
	}
}
EXPORT_SYMBOL_GPL(ipv4_update_pmtu);

1058
static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1059
{
1060 1061 1062
	const struct iphdr *iph = (const struct iphdr *) skb->data;
	struct flowi4 fl4;
	struct rtable *rt;
1063

1064
	__build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1065 1066 1067 1068

	if (!fl4.flowi4_mark)
		fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);

1069 1070 1071 1072 1073
	rt = __ip_route_output_key(sock_net(sk), &fl4);
	if (!IS_ERR(rt)) {
		__ip_rt_update_pmtu(rt, &fl4, mtu);
		ip_rt_put(rt);
	}
1074
}
1075 1076 1077 1078 1079 1080

void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
	const struct iphdr *iph = (const struct iphdr *) skb->data;
	struct flowi4 fl4;
	struct rtable *rt;
1081
	struct dst_entry *odst = NULL;
1082
	bool new = false;
1083
	struct net *net = sock_net(sk);
1084 1085

	bh_lock_sock(sk);
1086 1087 1088 1089

	if (!ip_sk_accept_pmtu(sk))
		goto out;

1090
	odst = sk_dst_get(sk);
1091

1092
	if (sock_owned_by_user(sk) || !odst) {
1093 1094 1095 1096
		__ipv4_sk_update_pmtu(skb, sk, mtu);
		goto out;
	}

1097
	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1098

1099
	rt = (struct rtable *)odst;
1100
	if (odst->obsolete && !odst->ops->check(odst, 0)) {
1101 1102 1103
		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
		if (IS_ERR(rt))
			goto out;
1104 1105

		new = true;
1106 1107 1108 1109
	}

	__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);

1110
	if (!dst_check(&rt->dst, 0)) {
1111 1112 1113
		if (new)
			dst_release(&rt->dst);

1114 1115 1116 1117
		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
		if (IS_ERR(rt))
			goto out;

1118
		new = true;
1119 1120
	}

1121
	if (new)
1122
		sk_dst_set(sk, &rt->dst);
1123 1124 1125

out:
	bh_unlock_sock(sk);
1126
	dst_release(odst);
1127
}
1128
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1129

1130 1131 1132
void ipv4_redirect(struct sk_buff *skb, struct net *net,
		   int oif, u32 mark, u8 protocol, int flow_flags)
{
1133
	const struct iphdr *iph = (const struct iphdr *) skb->data;
1134 1135 1136
	struct flowi4 fl4;
	struct rtable *rt;

1137
	__build_flow_key(net, &fl4, NULL, iph, oif,
1138
			 RT_TOS(iph->tos), protocol, mark, flow_flags);
1139 1140
	rt = __ip_route_output_key(net, &fl4);
	if (!IS_ERR(rt)) {
1141
		__ip_do_redirect(rt, skb, &fl4, false);
1142 1143 1144 1145 1146 1147 1148
		ip_rt_put(rt);
	}
}
EXPORT_SYMBOL_GPL(ipv4_redirect);

void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
1149 1150 1151
	const struct iphdr *iph = (const struct iphdr *) skb->data;
	struct flowi4 fl4;
	struct rtable *rt;
1152
	struct net *net = sock_net(sk);
1153

1154 1155
	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
	rt = __ip_route_output_key(net, &fl4);
1156
	if (!IS_ERR(rt)) {
1157
		__ip_do_redirect(rt, skb, &fl4, false);
1158 1159
		ip_rt_put(rt);
	}
1160 1161 1162
}
EXPORT_SYMBOL_GPL(ipv4_sk_redirect);

1163 1164 1165 1166
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
{
	struct rtable *rt = (struct rtable *) dst;

1167 1168 1169 1170
	/* All IPV4 dsts are created with ->obsolete set to the value
	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
	 * into this function always.
	 *
1171 1172 1173
	 * When a PMTU/redirect information update invalidates a route,
	 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
	 * DST_OBSOLETE_DEAD by dst_free().
1174
	 */
1175
	if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1176
		return NULL;
T
Timo Teräs 已提交
1177
	return dst;
L
Linus Torvalds 已提交
1178 1179 1180 1181 1182 1183 1184 1185
}

static void ipv4_link_failure(struct sk_buff *skb)
{
	struct rtable *rt;

	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);

E
Eric Dumazet 已提交
1186
	rt = skb_rtable(skb);
1187 1188
	if (rt)
		dst_set_expires(&rt->dst, 0);
L
Linus Torvalds 已提交
1189 1190
}

E
Eric W. Biederman 已提交
1191
static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
1192
{
1193 1194 1195
	pr_debug("%s: %pI4 -> %pI4, %s\n",
		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
		 skb->dev ? skb->dev->name : "?");
L
Linus Torvalds 已提交
1196
	kfree_skb(skb);
1197
	WARN_ON(1);
L
Linus Torvalds 已提交
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
	return 0;
}

/*
   We do not cache source address of outgoing interface,
   because it is used only by IP RR, TS and SRR options,
   so that it out of fast path.

   BTW remember: "addr" is allowed to be not aligned
   in IP options!
 */

1210
void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
L
Linus Torvalds 已提交
1211
{
A
Al Viro 已提交
1212
	__be32 src;
L
Linus Torvalds 已提交
1213

1214
	if (rt_is_output_route(rt))
1215
		src = ip_hdr(skb)->saddr;
E
Eric Dumazet 已提交
1216
	else {
1217 1218 1219 1220 1221 1222 1223 1224 1225
		struct fib_result res;
		struct flowi4 fl4;
		struct iphdr *iph;

		iph = ip_hdr(skb);

		memset(&fl4, 0, sizeof(fl4));
		fl4.daddr = iph->daddr;
		fl4.saddr = iph->saddr;
1226
		fl4.flowi4_tos = RT_TOS(iph->tos);
1227 1228 1229
		fl4.flowi4_oif = rt->dst.dev->ifindex;
		fl4.flowi4_iif = skb->dev->ifindex;
		fl4.flowi4_mark = skb->mark;
1230

E
Eric Dumazet 已提交
1231
		rcu_read_lock();
1232
		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1233
			src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
E
Eric Dumazet 已提交
1234
		else
1235 1236 1237
			src = inet_select_addr(rt->dst.dev,
					       rt_nexthop(rt, iph->daddr),
					       RT_SCOPE_UNIVERSE);
E
Eric Dumazet 已提交
1238 1239
		rcu_read_unlock();
	}
L
Linus Torvalds 已提交
1240 1241 1242
	memcpy(addr, &src, 4);
}

1243
#ifdef CONFIG_IP_ROUTE_CLASSID
L
Linus Torvalds 已提交
1244 1245
static void set_class_tag(struct rtable *rt, u32 tag)
{
1246 1247 1248 1249
	if (!(rt->dst.tclassid & 0xFFFF))
		rt->dst.tclassid |= tag & 0xFFFF;
	if (!(rt->dst.tclassid & 0xFFFF0000))
		rt->dst.tclassid |= tag & 0xFFFF0000;
L
Linus Torvalds 已提交
1250 1251 1252
}
#endif

1253 1254
static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
{
1255 1256 1257
	unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
	unsigned int advmss = max_t(unsigned int, dst->dev->mtu - header_size,
				    ip_rt_min_advmss);
1258

1259
	return min(advmss, IPV4_MAX_PMTU - header_size);
1260 1261
}

1262
static unsigned int ipv4_mtu(const struct dst_entry *dst)
1263
{
1264
	const struct rtable *rt = (const struct rtable *) dst;
1265 1266
	unsigned int mtu = rt->rt_pmtu;

1267
	if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1268
		mtu = dst_metric_raw(dst, RTAX_MTU);
1269

1270
	if (mtu)
1271 1272 1273
		return mtu;

	mtu = dst->dev->mtu;
1274 1275

	if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
J
Julian Anastasov 已提交
1276
		if (rt->rt_uses_gateway && mtu > 576)
1277 1278 1279
			mtu = 576;
	}

1280 1281 1282
	mtu = min_t(unsigned int, mtu, IP_MAX_MTU);

	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1283 1284
}

1285
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1286
{
1287
	struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1288 1289 1290
	struct fib_nh_exception *fnhe;
	u32 hval;

1291 1292 1293
	if (!hash)
		return NULL;

1294
	hval = fnhe_hashfun(daddr);
1295 1296 1297

	for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
1298 1299 1300 1301 1302
		if (fnhe->fnhe_daddr == daddr)
			return fnhe;
	}
	return NULL;
}
1303

1304
static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1305 1306
			      __be32 daddr)
{
1307 1308
	bool ret = false;

1309
	spin_lock_bh(&fnhe_lock);
1310

1311
	if (daddr == fnhe->fnhe_daddr) {
1312 1313
		struct rtable __rcu **porig;
		struct rtable *orig;
1314
		int genid = fnhe_genid(dev_net(rt->dst.dev));
1315 1316 1317 1318 1319 1320

		if (rt_is_input_route(rt))
			porig = &fnhe->fnhe_rth_input;
		else
			porig = &fnhe->fnhe_rth_output;
		orig = rcu_dereference(*porig);
1321 1322 1323

		if (fnhe->fnhe_genid != genid) {
			fnhe->fnhe_genid = genid;
1324 1325 1326
			fnhe->fnhe_gw = 0;
			fnhe->fnhe_pmtu = 0;
			fnhe->fnhe_expires = 0;
1327 1328
			fnhe_flush_routes(fnhe);
			orig = NULL;
1329
		}
1330 1331
		fill_route_from_fnhe(rt, fnhe);
		if (!rt->rt_gateway)
J
Julian Anastasov 已提交
1332
			rt->rt_gateway = daddr;
1333

1334 1335 1336 1337 1338 1339
		if (!(rt->dst.flags & DST_NOCACHE)) {
			rcu_assign_pointer(*porig, rt);
			if (orig)
				rt_free(orig);
			ret = true;
		}
1340 1341 1342 1343

		fnhe->fnhe_stamp = jiffies;
	}
	spin_unlock_bh(&fnhe_lock);
1344 1345

	return ret;
1346 1347
}

1348
static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1349
{
E
Eric Dumazet 已提交
1350
	struct rtable *orig, *prev, **p;
1351
	bool ret = true;
1352

E
Eric Dumazet 已提交
1353
	if (rt_is_input_route(rt)) {
1354
		p = (struct rtable **)&nh->nh_rth_input;
E
Eric Dumazet 已提交
1355
	} else {
1356
		p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
E
Eric Dumazet 已提交
1357
	}
1358 1359 1360 1361 1362
	orig = *p;

	prev = cmpxchg(p, orig, rt);
	if (prev == orig) {
		if (orig)
1363
			rt_free(orig);
J
Julian Anastasov 已提交
1364
	} else
1365 1366 1367 1368 1369
		ret = false;

	return ret;
}

E
Eric Dumazet 已提交
1370 1371 1372 1373 1374 1375
struct uncached_list {
	spinlock_t		lock;
	struct list_head	head;
};

static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1376 1377 1378

static void rt_add_uncached_list(struct rtable *rt)
{
E
Eric Dumazet 已提交
1379 1380 1381 1382 1383 1384 1385
	struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);

	rt->rt_uncached_list = ul;

	spin_lock_bh(&ul->lock);
	list_add_tail(&rt->rt_uncached, &ul->head);
	spin_unlock_bh(&ul->lock);
1386 1387 1388 1389
}

static void ipv4_dst_destroy(struct dst_entry *dst)
{
1390
	struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1391 1392
	struct rtable *rt = (struct rtable *) dst;

1393 1394 1395
	if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
		kfree(p);

1396
	if (!list_empty(&rt->rt_uncached)) {
E
Eric Dumazet 已提交
1397 1398 1399
		struct uncached_list *ul = rt->rt_uncached_list;

		spin_lock_bh(&ul->lock);
1400
		list_del(&rt->rt_uncached);
E
Eric Dumazet 已提交
1401
		spin_unlock_bh(&ul->lock);
1402 1403 1404 1405 1406
	}
}

void rt_flush_dev(struct net_device *dev)
{
E
Eric Dumazet 已提交
1407 1408 1409 1410 1411 1412
	struct net *net = dev_net(dev);
	struct rtable *rt;
	int cpu;

	for_each_possible_cpu(cpu) {
		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1413

E
Eric Dumazet 已提交
1414 1415
		spin_lock_bh(&ul->lock);
		list_for_each_entry(rt, &ul->head, rt_uncached) {
1416 1417 1418 1419 1420 1421
			if (rt->dst.dev != dev)
				continue;
			rt->dst.dev = net->loopback_dev;
			dev_hold(rt->dst.dev);
			dev_put(dev);
		}
E
Eric Dumazet 已提交
1422
		spin_unlock_bh(&ul->lock);
1423 1424 1425
	}
}

1426
static bool rt_cache_valid(const struct rtable *rt)
1427
{
1428 1429 1430
	return	rt &&
		rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
		!rt_is_expired(rt);
1431 1432
}

1433
static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1434
			   const struct fib_result *res,
1435
			   struct fib_nh_exception *fnhe,
1436
			   struct fib_info *fi, u16 type, u32 itag)
L
Linus Torvalds 已提交
1437
{
1438 1439
	bool cached = false;

L
Linus Torvalds 已提交
1440
	if (fi) {
1441 1442
		struct fib_nh *nh = &FIB_RES_NH(*res);

J
Julian Anastasov 已提交
1443
		if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1444
			rt->rt_gateway = nh->nh_gw;
J
Julian Anastasov 已提交
1445 1446
			rt->rt_uses_gateway = 1;
		}
1447 1448 1449 1450 1451
		dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
		if (fi->fib_metrics != &dst_default_metrics) {
			rt->dst._metrics |= DST_METRICS_REFCOUNTED;
			atomic_inc(&fi->fib_metrics->refcnt);
		}
1452
#ifdef CONFIG_IP_ROUTE_CLASSID
1453
		rt->dst.tclassid = nh->nh_tclassid;
L
Linus Torvalds 已提交
1454
#endif
1455
		rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
1456
		if (unlikely(fnhe))
1457
			cached = rt_bind_exception(rt, fnhe, daddr);
1458
		else if (!(rt->dst.flags & DST_NOCACHE))
1459
			cached = rt_cache_route(nh, rt);
J
Julian Anastasov 已提交
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
		if (unlikely(!cached)) {
			/* Routes we intend to cache in nexthop exception or
			 * FIB nexthop have the DST_NOCACHE bit clear.
			 * However, if we are unsuccessful at storing this
			 * route into the cache we really need to set it.
			 */
			rt->dst.flags |= DST_NOCACHE;
			if (!rt->rt_gateway)
				rt->rt_gateway = daddr;
			rt_add_uncached_list(rt);
		}
	} else
1472
		rt_add_uncached_list(rt);
1473

1474
#ifdef CONFIG_IP_ROUTE_CLASSID
L
Linus Torvalds 已提交
1475
#ifdef CONFIG_IP_MULTIPLE_TABLES
1476
	set_class_tag(rt, res->tclassid);
L
Linus Torvalds 已提交
1477 1478 1479 1480 1481
#endif
	set_class_tag(rt, itag);
#endif
}

1482 1483 1484
struct rtable *rt_dst_alloc(struct net_device *dev,
			    unsigned int flags, u16 type,
			    bool nopolicy, bool noxfrm, bool will_cache)
1485
{
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
	struct rtable *rt;

	rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
		       (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
		       (nopolicy ? DST_NOPOLICY : 0) |
		       (noxfrm ? DST_NOXFRM : 0));

	if (rt) {
		rt->rt_genid = rt_genid_ipv4(dev_net(dev));
		rt->rt_flags = flags;
		rt->rt_type = type;
		rt->rt_is_input = 0;
		rt->rt_iif = 0;
		rt->rt_pmtu = 0;
		rt->rt_gateway = 0;
		rt->rt_uses_gateway = 0;
D
David Ahern 已提交
1502
		rt->rt_table_id = 0;
1503 1504 1505 1506 1507 1508 1509 1510
		INIT_LIST_HEAD(&rt->rt_uncached);

		rt->dst.output = ip_output;
		if (flags & RTCF_LOCAL)
			rt->dst.input = ip_local_deliver;
	}

	return rt;
1511
}
1512
EXPORT_SYMBOL(rt_dst_alloc);
1513

1514
/* called in rcu_read_lock() section */
A
Al Viro 已提交
1515
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
L
Linus Torvalds 已提交
1516 1517 1518
				u8 tos, struct net_device *dev, int our)
{
	struct rtable *rth;
1519
	struct in_device *in_dev = __in_dev_get_rcu(dev);
1520
	unsigned int flags = RTCF_MULTICAST;
L
Linus Torvalds 已提交
1521
	u32 itag = 0;
1522
	int err;
L
Linus Torvalds 已提交
1523 1524 1525

	/* Primary sanity checks. */

1526
	if (!in_dev)
L
Linus Torvalds 已提交
1527 1528
		return -EINVAL;

1529
	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1530
	    skb->protocol != htons(ETH_P_IP))
L
Linus Torvalds 已提交
1531 1532
		goto e_inval;

1533 1534
	if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
		goto e_inval;
1535

1536 1537
	if (ipv4_is_zeronet(saddr)) {
		if (!ipv4_is_local_multicast(daddr))
L
Linus Torvalds 已提交
1538
			goto e_inval;
1539
	} else {
1540 1541
		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
					  in_dev, &itag);
1542 1543 1544
		if (err < 0)
			goto e_err;
	}
1545 1546 1547 1548
	if (our)
		flags |= RTCF_LOCAL;

	rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1549
			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
L
Linus Torvalds 已提交
1550 1551 1552
	if (!rth)
		goto e_nobufs;

1553 1554 1555
#ifdef CONFIG_IP_ROUTE_CLASSID
	rth->dst.tclassid = itag;
#endif
1556
	rth->dst.output = ip_rt_bug;
1557
	rth->rt_is_input= 1;
L
Linus Torvalds 已提交
1558 1559

#ifdef CONFIG_IP_MROUTE
1560
	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1561
		rth->dst.input = ip_mr_input;
L
Linus Torvalds 已提交
1562 1563 1564
#endif
	RT_CACHE_STAT_INC(in_slow_mc);

D
David S. Miller 已提交
1565 1566
	skb_dst_set(skb, &rth->dst);
	return 0;
L
Linus Torvalds 已提交
1567 1568 1569 1570

e_nobufs:
	return -ENOBUFS;
e_inval:
1571
	return -EINVAL;
1572 1573
e_err:
	return err;
L
Linus Torvalds 已提交
1574 1575 1576 1577 1578 1579
}


static void ip_handle_martian_source(struct net_device *dev,
				     struct in_device *in_dev,
				     struct sk_buff *skb,
A
Al Viro 已提交
1580 1581
				     __be32 daddr,
				     __be32 saddr)
L
Linus Torvalds 已提交
1582 1583 1584 1585 1586 1587 1588 1589
{
	RT_CACHE_STAT_INC(in_martian_src);
#ifdef CONFIG_IP_ROUTE_VERBOSE
	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
		/*
		 *	RFC1812 recommendation, if source is martian,
		 *	the only hint is MAC header.
		 */
J
Joe Perches 已提交
1590
		pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1591
			&daddr, &saddr, dev->name);
1592
		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
J
Joe Perches 已提交
1593 1594 1595 1596
			print_hex_dump(KERN_WARNING, "ll header: ",
				       DUMP_PREFIX_OFFSET, 16, 1,
				       skb_mac_header(skb),
				       dev->hard_header_len, true);
L
Linus Torvalds 已提交
1597 1598 1599 1600 1601
		}
	}
#endif
}

1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
{
	struct fnhe_hash_bucket *hash;
	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
	u32 hval = fnhe_hashfun(daddr);

	spin_lock_bh(&fnhe_lock);

	hash = rcu_dereference_protected(nh->nh_exceptions,
					 lockdep_is_held(&fnhe_lock));
	hash += hval;

	fnhe_p = &hash->chain;
	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
	while (fnhe) {
		if (fnhe->fnhe_daddr == daddr) {
			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
			fnhe_flush_routes(fnhe);
			kfree_rcu(fnhe, rcu);
			break;
		}
		fnhe_p = &fnhe->fnhe_next;
		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
						 lockdep_is_held(&fnhe_lock));
	}

	spin_unlock_bh(&fnhe_lock);
}

1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
static void set_lwt_redirect(struct rtable *rth)
{
	if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
		rth->dst.lwtstate->orig_output = rth->dst.output;
		rth->dst.output = lwtunnel_output;
	}

	if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
		rth->dst.lwtstate->orig_input = rth->dst.input;
		rth->dst.input = lwtunnel_input;
	}
}

1645
/* called in rcu_read_lock() section */
S
Stephen Hemminger 已提交
1646
static int __mkroute_input(struct sk_buff *skb,
1647
			   const struct fib_result *res,
S
Stephen Hemminger 已提交
1648
			   struct in_device *in_dev,
1649
			   __be32 daddr, __be32 saddr, u32 tos)
L
Linus Torvalds 已提交
1650
{
1651
	struct fib_nh_exception *fnhe;
L
Linus Torvalds 已提交
1652 1653 1654
	struct rtable *rth;
	int err;
	struct in_device *out_dev;
1655
	bool do_cache;
1656
	u32 itag = 0;
L
Linus Torvalds 已提交
1657 1658

	/* get a working reference to the output device */
1659
	out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1660
	if (!out_dev) {
1661
		net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
L
Linus Torvalds 已提交
1662 1663 1664
		return -EINVAL;
	}

1665
	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1666
				  in_dev->dev, in_dev, &itag);
L
Linus Torvalds 已提交
1667
	if (err < 0) {
1668
		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
L
Linus Torvalds 已提交
1669
					 saddr);
1670

L
Linus Torvalds 已提交
1671 1672 1673
		goto cleanup;
	}

J
Julian Anastasov 已提交
1674 1675
	do_cache = res->fi && !itag;
	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1676
	    skb->protocol == htons(ETH_P_IP) &&
L
Linus Torvalds 已提交
1677
	    (IN_DEV_SHARED_MEDIA(out_dev) ||
1678 1679
	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
		IPCB(skb)->flags |= IPSKB_DOREDIRECT;
L
Linus Torvalds 已提交
1680 1681 1682 1683

	if (skb->protocol != htons(ETH_P_IP)) {
		/* Not IP (i.e. ARP). Do not create route, if it is
		 * invalid for proxy arp. DNAT routes are always valid.
1684 1685 1686 1687
		 *
		 * Proxy arp feature have been extended to allow, ARP
		 * replies back to the same interface, to support
		 * Private VLAN switch technologies. See arp.c.
L
Linus Torvalds 已提交
1688
		 */
1689 1690
		if (out_dev == in_dev &&
		    IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
L
Linus Torvalds 已提交
1691 1692 1693 1694 1695
			err = -EINVAL;
			goto cleanup;
		}
	}

1696
	fnhe = find_exception(&FIB_RES_NH(*res), daddr);
J
Julian Anastasov 已提交
1697
	if (do_cache) {
1698
		if (fnhe) {
1699
			rth = rcu_dereference(fnhe->fnhe_rth_input);
1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
			if (rth && rth->dst.expires &&
			    time_after(jiffies, rth->dst.expires)) {
				ip_del_fnhe(&FIB_RES_NH(*res), daddr);
				fnhe = NULL;
			} else {
				goto rt_cache;
			}
		}

		rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1710

1711
rt_cache:
J
Julian Anastasov 已提交
1712 1713 1714
		if (rt_cache_valid(rth)) {
			skb_dst_set_noref(skb, &rth->dst);
			goto out;
1715 1716
		}
	}
1717

1718
	rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1719
			   IN_DEV_CONF_GET(in_dev, NOPOLICY),
1720
			   IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
L
Linus Torvalds 已提交
1721 1722 1723 1724 1725
	if (!rth) {
		err = -ENOBUFS;
		goto cleanup;
	}

1726
	rth->rt_is_input = 1;
D
David Ahern 已提交
1727 1728
	if (res->table)
		rth->rt_table_id = res->table->tb_id;
D
Duan Jiong 已提交
1729
	RT_CACHE_STAT_INC(in_slow_tot);
L
Linus Torvalds 已提交
1730

1731
	rth->dst.input = ip_forward;
L
Linus Torvalds 已提交
1732

1733
	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
1734
	set_lwt_redirect(rth);
1735
	skb_dst_set(skb, &rth->dst);
1736
out:
L
Linus Torvalds 已提交
1737 1738 1739
	err = 0;
 cleanup:
	return err;
1740
}
L
Linus Torvalds 已提交
1741

1742 1743
#ifdef CONFIG_IP_ROUTE_MULTIPATH
/* To make ICMP packets follow the right flow, the multipath hash is
1744
 * calculated from the inner IP addresses.
1745
 */
1746 1747
static void ip_multipath_l3_keys(const struct sk_buff *skb,
				 struct flow_keys *hash_keys)
1748 1749
{
	const struct iphdr *outer_iph = ip_hdr(skb);
1750
	const struct iphdr *inner_iph;
1751 1752
	const struct icmphdr *icmph;
	struct iphdr _inner_iph;
1753 1754 1755 1756 1757 1758
	struct icmphdr _icmph;

	hash_keys->addrs.v4addrs.src = outer_iph->saddr;
	hash_keys->addrs.v4addrs.dst = outer_iph->daddr;
	if (likely(outer_iph->protocol != IPPROTO_ICMP))
		return;
1759 1760

	if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1761
		return;
1762 1763 1764 1765

	icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
				   &_icmph);
	if (!icmph)
1766
		return;
1767 1768 1769 1770

	if (icmph->type != ICMP_DEST_UNREACH &&
	    icmph->type != ICMP_REDIRECT &&
	    icmph->type != ICMP_TIME_EXCEEDED &&
1771 1772
	    icmph->type != ICMP_PARAMETERPROB)
		return;
1773 1774 1775 1776 1777

	inner_iph = skb_header_pointer(skb,
				       outer_iph->ihl * 4 + sizeof(_icmph),
				       sizeof(_inner_iph), &_inner_iph);
	if (!inner_iph)
1778 1779 1780 1781
		return;
	hash_keys->addrs.v4addrs.src = inner_iph->saddr;
	hash_keys->addrs.v4addrs.dst = inner_iph->daddr;
}
1782

1783 1784 1785 1786 1787 1788 1789
/* if skb is set it will be used and fl4 can be NULL */
int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
		       const struct sk_buff *skb)
{
	struct net *net = fi->fib_net;
	struct flow_keys hash_keys;
	u32 mhash;
1790

1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
	switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
	case 0:
		memset(&hash_keys, 0, sizeof(hash_keys));
		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
		if (skb) {
			ip_multipath_l3_keys(skb, &hash_keys);
		} else {
			hash_keys.addrs.v4addrs.src = fl4->saddr;
			hash_keys.addrs.v4addrs.dst = fl4->daddr;
		}
		break;
	case 1:
		/* skb is currently provided only when forwarding */
		if (skb) {
			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
			struct flow_keys keys;

			/* short-circuit if we already have L4 hash present */
			if (skb->l4_hash)
				return skb_get_hash_raw(skb) >> 1;
			memset(&hash_keys, 0, sizeof(hash_keys));
			skb_flow_dissect_flow_keys(skb, &keys, flag);
			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
			hash_keys.ports.src = keys.ports.src;
			hash_keys.ports.dst = keys.ports.dst;
			hash_keys.basic.ip_proto = keys.basic.ip_proto;
		} else {
			memset(&hash_keys, 0, sizeof(hash_keys));
			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
			hash_keys.addrs.v4addrs.src = fl4->saddr;
			hash_keys.addrs.v4addrs.dst = fl4->daddr;
			hash_keys.ports.src = fl4->fl4_sport;
			hash_keys.ports.dst = fl4->fl4_dport;
			hash_keys.basic.ip_proto = fl4->flowi4_proto;
		}
		break;
	}
	mhash = flow_hash_from_keys(&hash_keys);
1830

1831 1832 1833
	return mhash >> 1;
}
EXPORT_SYMBOL_GPL(fib_multipath_hash);
1834 1835
#endif /* CONFIG_IP_ROUTE_MULTIPATH */

S
Stephen Hemminger 已提交
1836 1837 1838 1839
static int ip_mkroute_input(struct sk_buff *skb,
			    struct fib_result *res,
			    struct in_device *in_dev,
			    __be32 daddr, __be32 saddr, u32 tos)
L
Linus Torvalds 已提交
1840 1841
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
P
Peter Nørlund 已提交
1842
	if (res->fi && res->fi->fib_nhs > 1) {
1843
		int h = fib_multipath_hash(res->fi, NULL, skb);
P
Peter Nørlund 已提交
1844 1845 1846

		fib_select_multipath(res, h);
	}
L
Linus Torvalds 已提交
1847 1848 1849
#endif

	/* create a routing cache entry */
1850
	return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
L
Linus Torvalds 已提交
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
}

/*
 *	NOTE. We drop all the packets that has local source
 *	addresses, because every properly looped back packet
 *	must have correct destination already attached by output routine.
 *
 *	Such approach solves two big problems:
 *	1. Not simplex devices are handled properly.
 *	2. IP spoofing attempts are filtered with 100% of guarantee.
E
Eric Dumazet 已提交
1861
 *	called with rcu_read_lock()
L
Linus Torvalds 已提交
1862 1863
 */

A
Al Viro 已提交
1864
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1865 1866
			       u8 tos, struct net_device *dev,
			       struct fib_result *res)
L
Linus Torvalds 已提交
1867
{
1868
	struct in_device *in_dev = __in_dev_get_rcu(dev);
1869
	struct ip_tunnel_info *tun_info;
1870
	struct flowi4	fl4;
1871
	unsigned int	flags = 0;
L
Linus Torvalds 已提交
1872
	u32		itag = 0;
1873
	struct rtable	*rth;
L
Linus Torvalds 已提交
1874
	int		err = -EINVAL;
D
Daniel Baluta 已提交
1875
	struct net    *net = dev_net(dev);
1876
	bool do_cache;
L
Linus Torvalds 已提交
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886

	/* IP on this device is disabled. */

	if (!in_dev)
		goto out;

	/* Check for the most weird martians, which can be not detected
	   by fib_lookup.
	 */

1887
	tun_info = skb_tunnel_info(skb);
1888
	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1889 1890 1891
		fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
	else
		fl4.flowi4_tun_key.tun_id = 0;
T
Thomas Graf 已提交
1892 1893
	skb_dst_drop(skb);

1894
	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
L
Linus Torvalds 已提交
1895 1896
		goto martian_source;

1897 1898
	res->fi = NULL;
	res->table = NULL;
1899
	if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
L
Linus Torvalds 已提交
1900 1901 1902 1903 1904
		goto brd_input;

	/* Accept zero addresses only to limited broadcast;
	 * I even do not know to fix it or not. Waiting for complains :-)
	 */
1905
	if (ipv4_is_zeronet(saddr))
L
Linus Torvalds 已提交
1906 1907
		goto martian_source;

1908
	if (ipv4_is_zeronet(daddr))
L
Linus Torvalds 已提交
1909 1910
		goto martian_destination;

1911 1912 1913 1914 1915
	/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
	 * and call it once if daddr or/and saddr are loopback addresses
	 */
	if (ipv4_is_loopback(daddr)) {
		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1916
			goto martian_destination;
1917 1918
	} else if (ipv4_is_loopback(saddr)) {
		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1919 1920 1921
			goto martian_source;
	}

L
Linus Torvalds 已提交
1922 1923 1924
	/*
	 *	Now we are ready to route packet.
	 */
1925
	fl4.flowi4_oif = 0;
1926
	fl4.flowi4_iif = dev->ifindex;
1927 1928 1929
	fl4.flowi4_mark = skb->mark;
	fl4.flowi4_tos = tos;
	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1930
	fl4.flowi4_flags = 0;
1931 1932
	fl4.daddr = daddr;
	fl4.saddr = saddr;
1933
	fl4.flowi4_uid = sock_net_uid(net, NULL);
1934
	err = fib_lookup(net, &fl4, res, 0);
1935 1936 1937
	if (err != 0) {
		if (!IN_DEV_FORWARD(in_dev))
			err = -EHOSTUNREACH;
L
Linus Torvalds 已提交
1938
		goto no_route;
1939
	}
L
Linus Torvalds 已提交
1940

1941
	if (res->type == RTN_BROADCAST)
L
Linus Torvalds 已提交
1942 1943
		goto brd_input;

1944
	if (res->type == RTN_LOCAL) {
1945
		err = fib_validate_source(skb, saddr, daddr, tos,
1946
					  0, dev, in_dev, &itag);
1947
		if (err < 0)
1948
			goto martian_source;
L
Linus Torvalds 已提交
1949 1950 1951
		goto local_input;
	}

1952 1953
	if (!IN_DEV_FORWARD(in_dev)) {
		err = -EHOSTUNREACH;
1954
		goto no_route;
1955
	}
1956
	if (res->type != RTN_UNICAST)
L
Linus Torvalds 已提交
1957 1958
		goto martian_destination;

1959
	err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos);
L
Linus Torvalds 已提交
1960 1961 1962 1963 1964 1965
out:	return err;

brd_input:
	if (skb->protocol != htons(ETH_P_IP))
		goto e_inval;

1966
	if (!ipv4_is_zeronet(saddr)) {
1967 1968
		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
					  in_dev, &itag);
L
Linus Torvalds 已提交
1969
		if (err < 0)
1970
			goto martian_source;
L
Linus Torvalds 已提交
1971 1972
	}
	flags |= RTCF_BROADCAST;
1973
	res->type = RTN_BROADCAST;
L
Linus Torvalds 已提交
1974 1975 1976
	RT_CACHE_STAT_INC(in_brd);

local_input:
1977
	do_cache = false;
1978
	if (res->fi) {
1979
		if (!itag) {
1980
			rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1981
			if (rt_cache_valid(rth)) {
1982 1983 1984
				skb_dst_set_noref(skb, &rth->dst);
				err = 0;
				goto out;
1985 1986 1987 1988 1989
			}
			do_cache = true;
		}
	}

1990
	rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
1991
			   flags | RTCF_LOCAL, res->type,
1992
			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
L
Linus Torvalds 已提交
1993 1994 1995
	if (!rth)
		goto e_nobufs;

1996
	rth->dst.output= ip_rt_bug;
1997 1998 1999
#ifdef CONFIG_IP_ROUTE_CLASSID
	rth->dst.tclassid = itag;
#endif
2000
	rth->rt_is_input = 1;
2001 2002
	if (res->table)
		rth->rt_table_id = res->table->tb_id;
2003

D
Duan Jiong 已提交
2004
	RT_CACHE_STAT_INC(in_slow_tot);
2005
	if (res->type == RTN_UNREACHABLE) {
2006 2007
		rth->dst.input= ip_error;
		rth->dst.error= -err;
L
Linus Torvalds 已提交
2008 2009
		rth->rt_flags 	&= ~RTCF_LOCAL;
	}
2010

2011
	if (do_cache) {
2012
		struct fib_nh *nh = &FIB_RES_NH(*res);
2013 2014 2015 2016 2017 2018 2019 2020 2021

		rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
		if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
			WARN_ON(rth->dst.input == lwtunnel_input);
			rth->dst.lwtstate->orig_input = rth->dst.input;
			rth->dst.input = lwtunnel_input;
		}

		if (unlikely(!rt_cache_route(nh, rth))) {
2022 2023 2024 2025
			rth->dst.flags |= DST_NOCACHE;
			rt_add_uncached_list(rth);
		}
	}
D
David S. Miller 已提交
2026
	skb_dst_set(skb, &rth->dst);
2027
	err = 0;
E
Eric Dumazet 已提交
2028
	goto out;
L
Linus Torvalds 已提交
2029 2030 2031

no_route:
	RT_CACHE_STAT_INC(in_no_route);
2032 2033 2034
	res->type = RTN_UNREACHABLE;
	res->fi = NULL;
	res->table = NULL;
L
Linus Torvalds 已提交
2035 2036 2037 2038 2039 2040 2041 2042
	goto local_input;

	/*
	 *	Do not cache martian addresses: they should be logged (RFC1812)
	 */
martian_destination:
	RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
2043 2044 2045
	if (IN_DEV_LOG_MARTIANS(in_dev))
		net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
				     &daddr, &saddr, dev->name);
L
Linus Torvalds 已提交
2046
#endif
2047

L
Linus Torvalds 已提交
2048 2049
e_inval:
	err = -EINVAL;
E
Eric Dumazet 已提交
2050
	goto out;
L
Linus Torvalds 已提交
2051 2052 2053

e_nobufs:
	err = -ENOBUFS;
E
Eric Dumazet 已提交
2054
	goto out;
L
Linus Torvalds 已提交
2055 2056 2057

martian_source:
	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
E
Eric Dumazet 已提交
2058
	goto out;
L
Linus Torvalds 已提交
2059 2060
}

2061 2062
int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
			 u8 tos, struct net_device *dev)
L
Linus Torvalds 已提交
2063
{
2064 2065
	struct fib_result res;
	int err;
L
Linus Torvalds 已提交
2066

2067
	tos &= IPTOS_RT_MASK;
2068
	rcu_read_lock();
2069 2070
	err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
	rcu_read_unlock();
2071

2072 2073 2074 2075 2076 2077 2078 2079
	return err;
}
EXPORT_SYMBOL(ip_route_input_noref);

/* called with rcu_read_lock held */
int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
		       u8 tos, struct net_device *dev, struct fib_result *res)
{
L
Linus Torvalds 已提交
2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
	/* Multicast recognition logic is moved from route cache to here.
	   The problem was that too many Ethernet cards have broken/missing
	   hardware multicast filters :-( As result the host on multicasting
	   network acquires a lot of useless route cache entries, sort of
	   SDR messages from all the world. Now we try to get rid of them.
	   Really, provided software IP multicast filter is organized
	   reasonably (at least, hashed), it does not result in a slowdown
	   comparing with route cache reject entries.
	   Note, that multicast routers are not affected, because
	   route cache entry is created eventually.
	 */
2091
	if (ipv4_is_multicast(daddr)) {
2092
		struct in_device *in_dev = __in_dev_get_rcu(dev);
2093
		int our = 0;
2094
		int err = -EINVAL;
L
Linus Torvalds 已提交
2095

2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110
		if (in_dev)
			our = ip_check_mc_rcu(in_dev, daddr, saddr,
					      ip_hdr(skb)->protocol);

		/* check l3 master if no match yet */
		if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
			struct in_device *l3_in_dev;

			l3_in_dev = __in_dev_get_rcu(skb->dev);
			if (l3_in_dev)
				our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
						      ip_hdr(skb)->protocol);
		}

		if (our
L
Linus Torvalds 已提交
2111
#ifdef CONFIG_IP_MROUTE
2112 2113 2114
			||
		    (!ipv4_is_local_multicast(daddr) &&
		     IN_DEV_MFORWARD(in_dev))
L
Linus Torvalds 已提交
2115
#endif
2116
		   ) {
2117
			err = ip_route_input_mc(skb, daddr, saddr,
2118
						tos, dev, our);
L
Linus Torvalds 已提交
2119
		}
2120
		return err;
L
Linus Torvalds 已提交
2121
	}
2122 2123

	return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
L
Linus Torvalds 已提交
2124 2125
}

E
Eric Dumazet 已提交
2126
/* called with rcu_read_lock() */
2127
static struct rtable *__mkroute_output(const struct fib_result *res,
2128
				       const struct flowi4 *fl4, int orig_oif,
2129
				       struct net_device *dev_out,
2130
				       unsigned int flags)
L
Linus Torvalds 已提交
2131
{
2132
	struct fib_info *fi = res->fi;
2133
	struct fib_nh_exception *fnhe;
2134
	struct in_device *in_dev;
2135
	u16 type = res->type;
2136
	struct rtable *rth;
J
Julian Anastasov 已提交
2137
	bool do_cache;
L
Linus Torvalds 已提交
2138

2139 2140
	in_dev = __in_dev_get_rcu(dev_out);
	if (!in_dev)
2141
		return ERR_PTR(-EINVAL);
L
Linus Torvalds 已提交
2142

2143
	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2144 2145 2146
		if (ipv4_is_loopback(fl4->saddr) &&
		    !(dev_out->flags & IFF_LOOPBACK) &&
		    !netif_is_l3_master(dev_out))
2147 2148
			return ERR_PTR(-EINVAL);

2149
	if (ipv4_is_lbcast(fl4->daddr))
2150
		type = RTN_BROADCAST;
2151
	else if (ipv4_is_multicast(fl4->daddr))
2152
		type = RTN_MULTICAST;
2153
	else if (ipv4_is_zeronet(fl4->daddr))
2154
		return ERR_PTR(-EINVAL);
L
Linus Torvalds 已提交
2155 2156 2157 2158

	if (dev_out->flags & IFF_LOOPBACK)
		flags |= RTCF_LOCAL;

2159
	do_cache = true;
2160
	if (type == RTN_BROADCAST) {
L
Linus Torvalds 已提交
2161
		flags |= RTCF_BROADCAST | RTCF_LOCAL;
2162 2163
		fi = NULL;
	} else if (type == RTN_MULTICAST) {
E
Eric Dumazet 已提交
2164
		flags |= RTCF_MULTICAST | RTCF_LOCAL;
2165 2166
		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
				     fl4->flowi4_proto))
L
Linus Torvalds 已提交
2167
			flags &= ~RTCF_LOCAL;
2168 2169
		else
			do_cache = false;
L
Linus Torvalds 已提交
2170
		/* If multicast route do not exist use
E
Eric Dumazet 已提交
2171 2172
		 * default one, but do not gateway in this case.
		 * Yes, it is hack.
L
Linus Torvalds 已提交
2173
		 */
2174 2175
		if (fi && res->prefixlen < 4)
			fi = NULL;
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187
	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
		   (orig_oif != dev_out->ifindex)) {
		/* For local routes that require a particular output interface
		 * we do not want to cache the result.  Caching the result
		 * causes incorrect behaviour when there are multiple source
		 * addresses on the interface, the end result being that if the
		 * intended recipient is waiting on that interface for the
		 * packet he won't receive it because it will be delivered on
		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
		 * be set to the loopback interface as well.
		 */
		fi = NULL;
L
Linus Torvalds 已提交
2188 2189
	}

2190
	fnhe = NULL;
2191 2192
	do_cache &= fi != NULL;
	if (do_cache) {
2193
		struct rtable __rcu **prth;
J
Julian Anastasov 已提交
2194
		struct fib_nh *nh = &FIB_RES_NH(*res);
E
Eric Dumazet 已提交
2195

J
Julian Anastasov 已提交
2196
		fnhe = find_exception(nh, fl4->daddr);
2197
		if (fnhe) {
2198
			prth = &fnhe->fnhe_rth_output;
2199 2200 2201 2202 2203 2204 2205
			rth = rcu_dereference(*prth);
			if (rth && rth->dst.expires &&
			    time_after(jiffies, rth->dst.expires)) {
				ip_del_fnhe(nh, fl4->daddr);
				fnhe = NULL;
			} else {
				goto rt_cache;
J
Julian Anastasov 已提交
2206 2207
			}
		}
2208 2209 2210 2211 2212 2213 2214 2215 2216

		if (unlikely(fl4->flowi4_flags &
			     FLOWI_FLAG_KNOWN_NH &&
			     !(nh->nh_gw &&
			       nh->nh_scope == RT_SCOPE_LINK))) {
			do_cache = false;
			goto add;
		}
		prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2217
		rth = rcu_dereference(*prth);
2218 2219

rt_cache:
2220 2221 2222
		if (rt_cache_valid(rth)) {
			dst_hold(&rth->dst);
			return rth;
2223 2224
		}
	}
J
Julian Anastasov 已提交
2225 2226

add:
2227
	rth = rt_dst_alloc(dev_out, flags, type,
2228
			   IN_DEV_CONF_GET(in_dev, NOPOLICY),
2229
			   IN_DEV_CONF_GET(in_dev, NOXFRM),
J
Julian Anastasov 已提交
2230
			   do_cache);
2231
	if (!rth)
2232
		return ERR_PTR(-ENOBUFS);
2233

2234
	rth->rt_iif	= orig_oif ? : 0;
D
David Ahern 已提交
2235 2236 2237
	if (res->table)
		rth->rt_table_id = res->table->tb_id;

L
Linus Torvalds 已提交
2238 2239 2240
	RT_CACHE_STAT_INC(out_slow_tot);

	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2241
		if (flags & RTCF_LOCAL &&
L
Linus Torvalds 已提交
2242
		    !(dev_out->flags & IFF_LOOPBACK)) {
2243
			rth->dst.output = ip_mc_output;
L
Linus Torvalds 已提交
2244 2245 2246
			RT_CACHE_STAT_INC(out_slow_mc);
		}
#ifdef CONFIG_IP_MROUTE
2247
		if (type == RTN_MULTICAST) {
L
Linus Torvalds 已提交
2248
			if (IN_DEV_MFORWARD(in_dev) &&
2249
			    !ipv4_is_local_multicast(fl4->daddr)) {
2250 2251
				rth->dst.input = ip_mr_input;
				rth->dst.output = ip_mc_output;
L
Linus Torvalds 已提交
2252 2253 2254 2255 2256
			}
		}
#endif
	}

2257
	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
2258
	set_lwt_redirect(rth);
L
Linus Torvalds 已提交
2259

2260
	return rth;
L
Linus Torvalds 已提交
2261 2262 2263 2264 2265 2266
}

/*
 * Major route resolver routine.
 */

2267 2268
struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
					const struct sk_buff *skb)
L
Linus Torvalds 已提交
2269
{
2270
	__u8 tos = RT_FL_TOS(fl4);
2271
	struct fib_result res;
2272
	struct rtable *rth;
L
Linus Torvalds 已提交
2273

2274
	res.tclassid	= 0;
L
Linus Torvalds 已提交
2275
	res.fi		= NULL;
2276
	res.table	= NULL;
L
Linus Torvalds 已提交
2277

2278
	fl4->flowi4_iif = LOOPBACK_IFINDEX;
2279 2280 2281
	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
			 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2282

2283
	rcu_read_lock();
2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300
	rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
	rcu_read_unlock();

	return rth;
}
EXPORT_SYMBOL_GPL(ip_route_output_key_hash);

struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
					    struct fib_result *res,
					    const struct sk_buff *skb)
{
	struct net_device *dev_out = NULL;
	int orig_oif = fl4->flowi4_oif;
	unsigned int flags = 0;
	struct rtable *rth;
	int err = -ENETUNREACH;

2301
	if (fl4->saddr) {
2302
		rth = ERR_PTR(-EINVAL);
2303 2304 2305
		if (ipv4_is_multicast(fl4->saddr) ||
		    ipv4_is_lbcast(fl4->saddr) ||
		    ipv4_is_zeronet(fl4->saddr))
L
Linus Torvalds 已提交
2306 2307 2308 2309
			goto out;

		/* I removed check for oif == dev_out->oif here.
		   It was wrong for two reasons:
2310 2311
		   1. ip_dev_find(net, saddr) can return wrong iface, if saddr
		      is assigned to multiple interfaces.
L
Linus Torvalds 已提交
2312 2313 2314 2315
		   2. Moreover, we are allowed to send packets with saddr
		      of another iface. --ANK
		 */

2316 2317 2318
		if (fl4->flowi4_oif == 0 &&
		    (ipv4_is_multicast(fl4->daddr) ||
		     ipv4_is_lbcast(fl4->daddr))) {
2319
			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2320
			dev_out = __ip_dev_find(net, fl4->saddr, false);
2321
			if (!dev_out)
2322 2323
				goto out;

L
Linus Torvalds 已提交
2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338
			/* Special hack: user can direct multicasts
			   and limited broadcast via necessary interface
			   without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
			   This hack is not just for fun, it allows
			   vic,vat and friends to work.
			   They bind socket to loopback, set ttl to zero
			   and expect that it will work.
			   From the viewpoint of routing cache they are broken,
			   because we are not allowed to build multicast path
			   with loopback source addr (look, routing cache
			   cannot know, that ttl is zero, so that packet
			   will not leave this host and route is valid).
			   Luckily, this hack is good workaround.
			 */

2339
			fl4->flowi4_oif = dev_out->ifindex;
L
Linus Torvalds 已提交
2340 2341
			goto make_route;
		}
2342

2343
		if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2344
			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2345
			if (!__ip_dev_find(net, fl4->saddr, false))
2346 2347
				goto out;
		}
L
Linus Torvalds 已提交
2348 2349 2350
	}


2351 2352
	if (fl4->flowi4_oif) {
		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2353
		rth = ERR_PTR(-ENODEV);
2354
		if (!dev_out)
L
Linus Torvalds 已提交
2355
			goto out;
2356 2357

		/* RACE: Check return value of inet_select_addr instead. */
2358
		if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2359
			rth = ERR_PTR(-ENETUNREACH);
2360 2361
			goto out;
		}
2362
		if (ipv4_is_local_multicast(fl4->daddr) ||
2363 2364
		    ipv4_is_lbcast(fl4->daddr) ||
		    fl4->flowi4_proto == IPPROTO_IGMP) {
2365 2366 2367
			if (!fl4->saddr)
				fl4->saddr = inet_select_addr(dev_out, 0,
							      RT_SCOPE_LINK);
L
Linus Torvalds 已提交
2368 2369
			goto make_route;
		}
2370
		if (!fl4->saddr) {
2371 2372 2373 2374 2375 2376
			if (ipv4_is_multicast(fl4->daddr))
				fl4->saddr = inet_select_addr(dev_out, 0,
							      fl4->flowi4_scope);
			else if (!fl4->daddr)
				fl4->saddr = inet_select_addr(dev_out, 0,
							      RT_SCOPE_HOST);
L
Linus Torvalds 已提交
2377 2378 2379
		}
	}

2380 2381 2382 2383
	if (!fl4->daddr) {
		fl4->daddr = fl4->saddr;
		if (!fl4->daddr)
			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2384
		dev_out = net->loopback_dev;
2385
		fl4->flowi4_oif = LOOPBACK_IFINDEX;
2386
		res->type = RTN_LOCAL;
L
Linus Torvalds 已提交
2387 2388 2389 2390
		flags |= RTCF_LOCAL;
		goto make_route;
	}

2391
	err = fib_lookup(net, fl4, res, 0);
2392
	if (err) {
2393 2394
		res->fi = NULL;
		res->table = NULL;
2395
		if (fl4->flowi4_oif &&
2396 2397
		    (ipv4_is_multicast(fl4->daddr) ||
		    !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
L
Linus Torvalds 已提交
2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415
			/* Apparently, routing tables are wrong. Assume,
			   that the destination is on link.

			   WHY? DW.
			   Because we are allowed to send to iface
			   even if it has NO routes and NO assigned
			   addresses. When oif is specified, routing
			   tables are looked up with only one purpose:
			   to catch if destination is gatewayed, rather than
			   direct. Moreover, if MSG_DONTROUTE is set,
			   we send packet, ignoring both routing tables
			   and ifaddr state. --ANK


			   We could make it even if oif is unknown,
			   likely IPv6, but we do not.
			 */

2416 2417 2418
			if (fl4->saddr == 0)
				fl4->saddr = inet_select_addr(dev_out, 0,
							      RT_SCOPE_LINK);
2419
			res->type = RTN_UNICAST;
L
Linus Torvalds 已提交
2420 2421
			goto make_route;
		}
2422
		rth = ERR_PTR(err);
L
Linus Torvalds 已提交
2423 2424 2425
		goto out;
	}

2426
	if (res->type == RTN_LOCAL) {
2427
		if (!fl4->saddr) {
2428 2429
			if (res->fi->fib_prefsrc)
				fl4->saddr = res->fi->fib_prefsrc;
2430
			else
2431
				fl4->saddr = fl4->daddr;
2432
		}
2433 2434

		/* L3 master device is the loopback for that domain */
2435
		dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2436
			net->loopback_dev;
2437
		fl4->flowi4_oif = dev_out->ifindex;
L
Linus Torvalds 已提交
2438 2439 2440 2441
		flags |= RTCF_LOCAL;
		goto make_route;
	}

2442
	fib_select_path(net, res, fl4, skb);
L
Linus Torvalds 已提交
2443

2444
	dev_out = FIB_RES_DEV(*res);
2445
	fl4->flowi4_oif = dev_out->ifindex;
L
Linus Torvalds 已提交
2446 2447 2448


make_route:
2449
	rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
L
Linus Torvalds 已提交
2450

2451
out:
2452
	return rth;
L
Linus Torvalds 已提交
2453
}
2454

2455 2456 2457 2458 2459
static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
{
	return NULL;
}

2460
static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2461
{
2462 2463 2464
	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);

	return mtu ? : dst->dev->mtu;
2465 2466
}

2467 2468
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
					  struct sk_buff *skb, u32 mtu)
2469 2470 2471
{
}

2472 2473
static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
				       struct sk_buff *skb)
2474 2475 2476
{
}

2477 2478 2479 2480 2481 2482
static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
					  unsigned long old)
{
	return NULL;
}

2483 2484
static struct dst_ops ipv4_dst_blackhole_ops = {
	.family			=	AF_INET,
2485
	.check			=	ipv4_blackhole_dst_check,
2486
	.mtu			=	ipv4_blackhole_mtu,
2487
	.default_advmss		=	ipv4_default_advmss,
2488
	.update_pmtu		=	ipv4_rt_blackhole_update_pmtu,
2489
	.redirect		=	ipv4_rt_blackhole_redirect,
2490
	.cow_metrics		=	ipv4_rt_blackhole_cow_metrics,
2491
	.neigh_lookup		=	ipv4_neigh_lookup,
2492 2493
};

2494
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2495
{
2496
	struct rtable *ort = (struct rtable *) dst_orig;
2497
	struct rtable *rt;
2498

2499
	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2500
	if (rt) {
2501
		struct dst_entry *new = &rt->dst;
2502 2503

		new->__use = 1;
2504
		new->input = dst_discard;
E
Eric W. Biederman 已提交
2505
		new->output = dst_discard_out;
2506

2507
		new->dev = net->loopback_dev;
2508 2509 2510
		if (new->dev)
			dev_hold(new->dev);

2511
		rt->rt_is_input = ort->rt_is_input;
2512
		rt->rt_iif = ort->rt_iif;
2513
		rt->rt_pmtu = ort->rt_pmtu;
2514

F
fan.du 已提交
2515
		rt->rt_genid = rt_genid_ipv4(net);
2516 2517 2518
		rt->rt_flags = ort->rt_flags;
		rt->rt_type = ort->rt_type;
		rt->rt_gateway = ort->rt_gateway;
J
Julian Anastasov 已提交
2519
		rt->rt_uses_gateway = ort->rt_uses_gateway;
2520

2521
		INIT_LIST_HEAD(&rt->rt_uncached);
2522 2523 2524
		dst_free(new);
	}

2525 2526 2527
	dst_release(dst_orig);

	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2528 2529
}

2530
struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2531
				    const struct sock *sk)
L
Linus Torvalds 已提交
2532
{
2533
	struct rtable *rt = __ip_route_output_key(net, flp4);
L
Linus Torvalds 已提交
2534

2535 2536
	if (IS_ERR(rt))
		return rt;
L
Linus Torvalds 已提交
2537

2538
	if (flp4->flowi4_proto)
2539 2540 2541
		rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
							flowi4_to_flowi(flp4),
							sk, 0);
L
Linus Torvalds 已提交
2542

2543
	return rt;
L
Linus Torvalds 已提交
2544
}
2545 2546
EXPORT_SYMBOL_GPL(ip_route_output_flow);

2547
/* called with rcu_read_lock held */
2548
static int rt_fill_info(struct net *net,  __be32 dst, __be32 src, u32 table_id,
2549
			struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2550
			u32 seq)
L
Linus Torvalds 已提交
2551
{
2552
	struct rtable *rt = skb_rtable(skb);
L
Linus Torvalds 已提交
2553
	struct rtmsg *r;
2554
	struct nlmsghdr *nlh;
2555
	unsigned long expires = 0;
2556
	u32 error;
J
Julian Anastasov 已提交
2557
	u32 metrics[RTAX_MAX];
2558

2559
	nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2560
	if (!nlh)
2561
		return -EMSGSIZE;
2562 2563

	r = nlmsg_data(nlh);
L
Linus Torvalds 已提交
2564 2565 2566
	r->rtm_family	 = AF_INET;
	r->rtm_dst_len	= 32;
	r->rtm_src_len	= 0;
2567
	r->rtm_tos	= fl4->flowi4_tos;
2568
	r->rtm_table	= table_id < 256 ? table_id : RT_TABLE_COMPAT;
2569
	if (nla_put_u32(skb, RTA_TABLE, table_id))
D
David S. Miller 已提交
2570
		goto nla_put_failure;
L
Linus Torvalds 已提交
2571 2572 2573 2574 2575 2576
	r->rtm_type	= rt->rt_type;
	r->rtm_scope	= RT_SCOPE_UNIVERSE;
	r->rtm_protocol = RTPROT_UNSPEC;
	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
	if (rt->rt_flags & RTCF_NOTIFY)
		r->rtm_flags |= RTM_F_NOTIFY;
2577 2578
	if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
		r->rtm_flags |= RTCF_DOREDIRECT;
2579

2580
	if (nla_put_in_addr(skb, RTA_DST, dst))
D
David S. Miller 已提交
2581
		goto nla_put_failure;
2582
	if (src) {
L
Linus Torvalds 已提交
2583
		r->rtm_src_len = 32;
2584
		if (nla_put_in_addr(skb, RTA_SRC, src))
D
David S. Miller 已提交
2585
			goto nla_put_failure;
L
Linus Torvalds 已提交
2586
	}
D
David S. Miller 已提交
2587 2588 2589
	if (rt->dst.dev &&
	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
		goto nla_put_failure;
2590
#ifdef CONFIG_IP_ROUTE_CLASSID
D
David S. Miller 已提交
2591 2592 2593
	if (rt->dst.tclassid &&
	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
		goto nla_put_failure;
L
Linus Torvalds 已提交
2594
#endif
2595
	if (!rt_is_input_route(rt) &&
2596
	    fl4->saddr != src) {
2597
		if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
D
David S. Miller 已提交
2598 2599
			goto nla_put_failure;
	}
J
Julian Anastasov 已提交
2600
	if (rt->rt_uses_gateway &&
2601
	    nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
D
David S. Miller 已提交
2602
		goto nla_put_failure;
2603

2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
	expires = rt->dst.expires;
	if (expires) {
		unsigned long now = jiffies;

		if (time_before(now, expires))
			expires -= now;
		else
			expires = 0;
	}

J
Julian Anastasov 已提交
2614
	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2615
	if (rt->rt_pmtu && expires)
J
Julian Anastasov 已提交
2616 2617
		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
	if (rtnetlink_put_metrics(skb, metrics) < 0)
2618 2619
		goto nla_put_failure;

2620
	if (fl4->flowi4_mark &&
2621
	    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
D
David S. Miller 已提交
2622
		goto nla_put_failure;
E
Eric Dumazet 已提交
2623

2624 2625 2626 2627 2628
	if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
	    nla_put_u32(skb, RTA_UID,
			from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
		goto nla_put_failure;

2629
	error = rt->dst.error;
2630

2631
	if (rt_is_input_route(rt)) {
2632 2633 2634 2635 2636
#ifdef CONFIG_IP_MROUTE
		if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
		    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
			int err = ipmr_get_route(net, skb,
						 fl4->saddr, fl4->daddr,
2637
						 r, portid);
2638

2639
			if (err <= 0) {
D
David Ahern 已提交
2640 2641 2642
				if (err == 0)
					return 0;
				goto nla_put_failure;
2643 2644 2645
			}
		} else
#endif
2646
			if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2647
				goto nla_put_failure;
L
Linus Torvalds 已提交
2648 2649
	}

2650
	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2651
		goto nla_put_failure;
2652

2653 2654
	nlmsg_end(skb, nlh);
	return 0;
L
Linus Torvalds 已提交
2655

2656
nla_put_failure:
2657 2658
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
L
Linus Torvalds 已提交
2659 2660
}

2661 2662
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
			     struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
2663
{
2664
	struct net *net = sock_net(in_skb->sk);
2665 2666
	struct rtmsg *rtm;
	struct nlattr *tb[RTA_MAX+1];
2667
	struct fib_result res = {};
L
Linus Torvalds 已提交
2668
	struct rtable *rt = NULL;
2669
	struct flowi4 fl4;
A
Al Viro 已提交
2670 2671 2672
	__be32 dst = 0;
	__be32 src = 0;
	u32 iif;
2673
	int err;
E
Eric Dumazet 已提交
2674
	int mark;
L
Linus Torvalds 已提交
2675
	struct sk_buff *skb;
2676
	u32 table_id = RT_TABLE_MAIN;
2677
	kuid_t uid;
L
Linus Torvalds 已提交
2678

2679
	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy,
2680
			  extack);
2681 2682 2683 2684 2685
	if (err < 0)
		goto errout;

	rtm = nlmsg_data(nlh);

L
Linus Torvalds 已提交
2686
	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2687
	if (!skb) {
2688 2689 2690
		err = -ENOBUFS;
		goto errout;
	}
L
Linus Torvalds 已提交
2691 2692 2693 2694

	/* Reserve room for dummy headers, this skb can pass
	   through good chunk of routing engine.
	 */
2695
	skb_reset_mac_header(skb);
2696
	skb_reset_network_header(skb);
2697

2698 2699
	src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
	dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2700
	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
E
Eric Dumazet 已提交
2701
	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2702 2703 2704 2705
	if (tb[RTA_UID])
		uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
	else
		uid = (iif ? INVALID_UID : current_uid());
L
Linus Torvalds 已提交
2706

2707 2708 2709 2710 2711 2712 2713 2714 2715
	/* Bugfix: need to give ip_route_input enough of an IP header to
	 * not gag.
	 */
	ip_hdr(skb)->protocol = IPPROTO_UDP;
	ip_hdr(skb)->saddr = src;
	ip_hdr(skb)->daddr = dst;

	skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));

2716 2717 2718 2719 2720 2721
	memset(&fl4, 0, sizeof(fl4));
	fl4.daddr = dst;
	fl4.saddr = src;
	fl4.flowi4_tos = rtm->rtm_tos;
	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
	fl4.flowi4_mark = mark;
2722
	fl4.flowi4_uid = uid;
2723

2724 2725
	rcu_read_lock();

L
Linus Torvalds 已提交
2726
	if (iif) {
2727 2728
		struct net_device *dev;

2729
		dev = dev_get_by_index_rcu(net, iif);
2730
		if (!dev) {
2731 2732 2733 2734
			err = -ENODEV;
			goto errout_free;
		}

L
Linus Torvalds 已提交
2735 2736
		skb->protocol	= htons(ETH_P_IP);
		skb->dev	= dev;
E
Eric Dumazet 已提交
2737
		skb->mark	= mark;
2738 2739
		err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
					 dev, &res);
2740

E
Eric Dumazet 已提交
2741
		rt = skb_rtable(skb);
2742 2743
		if (err == 0 && rt->dst.error)
			err = -rt->dst.error;
L
Linus Torvalds 已提交
2744
	} else {
2745
		rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
2746 2747 2748
		err = 0;
		if (IS_ERR(rt))
			err = PTR_ERR(rt);
L
Linus Torvalds 已提交
2749
	}
2750

L
Linus Torvalds 已提交
2751
	if (err)
2752
		goto errout_free;
L
Linus Torvalds 已提交
2753

2754
	skb_dst_set(skb, &rt->dst);
L
Linus Torvalds 已提交
2755 2756 2757
	if (rtm->rtm_flags & RTM_F_NOTIFY)
		rt->rt_flags |= RTCF_NOTIFY;

2758 2759 2760
	if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
		table_id = rt->rt_table_id;

2761 2762 2763 2764 2765 2766 2767
	if (rtm->rtm_flags & RTM_F_FIB_MATCH)
		err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
				    nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
				    rt->rt_type, res.prefix, res.prefixlen,
				    fl4.flowi4_tos, res.fi, 0);
	else
		err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
2768
				   NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
2769
	if (err < 0)
2770
		goto errout_free;
L
Linus Torvalds 已提交
2771

2772 2773
	rcu_read_unlock();

2774
	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2775
errout:
2776
	return err;
L
Linus Torvalds 已提交
2777

2778
errout_free:
2779
	rcu_read_unlock();
L
Linus Torvalds 已提交
2780
	kfree_skb(skb);
2781
	goto errout;
L
Linus Torvalds 已提交
2782 2783 2784 2785
}

void ip_rt_multicast_event(struct in_device *in_dev)
{
2786
	rt_cache_flush(dev_net(in_dev->dev));
L
Linus Torvalds 已提交
2787 2788 2789
}

#ifdef CONFIG_SYSCTL
2790 2791 2792 2793
static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
static int ip_rt_gc_elasticity __read_mostly	= 8;

2794
static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2795
					void __user *buffer,
L
Linus Torvalds 已提交
2796 2797
					size_t *lenp, loff_t *ppos)
{
2798 2799
	struct net *net = (struct net *)__ctl->extra1;

L
Linus Torvalds 已提交
2800
	if (write) {
2801 2802
		rt_cache_flush(net);
		fnhe_genid_bump(net);
L
Linus Torvalds 已提交
2803
		return 0;
2804
	}
L
Linus Torvalds 已提交
2805 2806 2807 2808

	return -EINVAL;
}

2809
static struct ctl_table ipv4_route_table[] = {
L
Linus Torvalds 已提交
2810 2811 2812 2813 2814
	{
		.procname	= "gc_thresh",
		.data		= &ipv4_dst_ops.gc_thresh,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2815
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
2816 2817 2818 2819 2820 2821
	},
	{
		.procname	= "max_size",
		.data		= &ip_rt_max_size,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2822
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
2823 2824 2825
	},
	{
		/*  Deprecated. Use gc_min_interval_ms */
2826

L
Linus Torvalds 已提交
2827 2828 2829 2830
		.procname	= "gc_min_interval",
		.data		= &ip_rt_gc_min_interval,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2831
		.proc_handler	= proc_dointvec_jiffies,
L
Linus Torvalds 已提交
2832 2833 2834 2835 2836 2837
	},
	{
		.procname	= "gc_min_interval_ms",
		.data		= &ip_rt_gc_min_interval,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2838
		.proc_handler	= proc_dointvec_ms_jiffies,
L
Linus Torvalds 已提交
2839 2840 2841 2842 2843 2844
	},
	{
		.procname	= "gc_timeout",
		.data		= &ip_rt_gc_timeout,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2845
		.proc_handler	= proc_dointvec_jiffies,
L
Linus Torvalds 已提交
2846
	},
2847 2848 2849 2850 2851 2852 2853
	{
		.procname	= "gc_interval",
		.data		= &ip_rt_gc_interval,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_jiffies,
	},
L
Linus Torvalds 已提交
2854 2855 2856 2857 2858
	{
		.procname	= "redirect_load",
		.data		= &ip_rt_redirect_load,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2859
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
2860 2861 2862 2863 2864 2865
	},
	{
		.procname	= "redirect_number",
		.data		= &ip_rt_redirect_number,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2866
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
2867 2868 2869 2870 2871 2872
	},
	{
		.procname	= "redirect_silence",
		.data		= &ip_rt_redirect_silence,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2873
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
2874 2875 2876 2877 2878 2879
	},
	{
		.procname	= "error_cost",
		.data		= &ip_rt_error_cost,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2880
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
2881 2882 2883 2884 2885 2886
	},
	{
		.procname	= "error_burst",
		.data		= &ip_rt_error_burst,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2887
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
2888 2889 2890 2891 2892 2893
	},
	{
		.procname	= "gc_elasticity",
		.data		= &ip_rt_gc_elasticity,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2894
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
2895 2896 2897 2898 2899 2900
	},
	{
		.procname	= "mtu_expires",
		.data		= &ip_rt_mtu_expires,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2901
		.proc_handler	= proc_dointvec_jiffies,
L
Linus Torvalds 已提交
2902 2903 2904 2905 2906 2907
	},
	{
		.procname	= "min_pmtu",
		.data		= &ip_rt_min_pmtu,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2908
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
2909 2910 2911 2912 2913 2914
	},
	{
		.procname	= "min_adv_mss",
		.data		= &ip_rt_min_advmss,
		.maxlen		= sizeof(int),
		.mode		= 0644,
A
Alexey Dobriyan 已提交
2915
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
2916
	},
2917
	{ }
L
Linus Torvalds 已提交
2918
};
2919 2920 2921 2922 2923 2924

static struct ctl_table ipv4_route_flush_table[] = {
	{
		.procname	= "flush",
		.maxlen		= sizeof(int),
		.mode		= 0200,
A
Alexey Dobriyan 已提交
2925
		.proc_handler	= ipv4_sysctl_rtcache_flush,
2926
	},
2927
	{ },
2928 2929 2930 2931 2932 2933 2934
};

static __net_init int sysctl_route_net_init(struct net *net)
{
	struct ctl_table *tbl;

	tbl = ipv4_route_flush_table;
O
Octavian Purdila 已提交
2935
	if (!net_eq(net, &init_net)) {
2936
		tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2937
		if (!tbl)
2938
			goto err_dup;
2939 2940 2941 2942

		/* Don't export sysctls to unprivileged users */
		if (net->user_ns != &init_user_ns)
			tbl[0].procname = NULL;
2943 2944 2945
	}
	tbl[0].extra1 = net;

2946
	net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2947
	if (!net->ipv4.route_hdr)
2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971
		goto err_reg;
	return 0;

err_reg:
	if (tbl != ipv4_route_flush_table)
		kfree(tbl);
err_dup:
	return -ENOMEM;
}

static __net_exit void sysctl_route_net_exit(struct net *net)
{
	struct ctl_table *tbl;

	tbl = net->ipv4.route_hdr->ctl_table_arg;
	unregister_net_sysctl_table(net->ipv4.route_hdr);
	BUG_ON(tbl == ipv4_route_flush_table);
	kfree(tbl);
}

static __net_initdata struct pernet_operations sysctl_route_ops = {
	.init = sysctl_route_net_init,
	.exit = sysctl_route_net_exit,
};
L
Linus Torvalds 已提交
2972 2973
#endif

2974
static __net_init int rt_genid_init(struct net *net)
2975
{
F
fan.du 已提交
2976
	atomic_set(&net->ipv4.rt_genid, 0);
2977
	atomic_set(&net->fnhe_genid, 0);
2978 2979
	get_random_bytes(&net->ipv4.dev_addr_genid,
			 sizeof(net->ipv4.dev_addr_genid));
2980 2981 2982
	return 0;
}

2983 2984
static __net_initdata struct pernet_operations rt_genid_ops = {
	.init = rt_genid_init,
2985 2986
};

2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002
static int __net_init ipv4_inetpeer_init(struct net *net)
{
	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);

	if (!bp)
		return -ENOMEM;
	inet_peer_base_init(bp);
	net->ipv4.peers = bp;
	return 0;
}

static void __net_exit ipv4_inetpeer_exit(struct net *net)
{
	struct inet_peer_base *bp = net->ipv4.peers;

	net->ipv4.peers = NULL;
3003
	inetpeer_invalidate_tree(bp);
3004 3005 3006 3007 3008 3009 3010
	kfree(bp);
}

static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
	.init	=	ipv4_inetpeer_init,
	.exit	=	ipv4_inetpeer_exit,
};
3011

3012
#ifdef CONFIG_IP_ROUTE_CLASSID
3013
struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3014
#endif /* CONFIG_IP_ROUTE_CLASSID */
L
Linus Torvalds 已提交
3015 3016 3017

int __init ip_rt_init(void)
{
3018
	int rc = 0;
E
Eric Dumazet 已提交
3019
	int cpu;
L
Linus Torvalds 已提交
3020

E
Eric Dumazet 已提交
3021 3022 3023 3024 3025 3026
	ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
	if (!ip_idents)
		panic("IP: failed to allocate ip_idents\n");

	prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));

E
Eric Dumazet 已提交
3027 3028 3029 3030
	ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
	if (!ip_tstamps)
		panic("IP: failed to allocate ip_tstamps\n");

E
Eric Dumazet 已提交
3031 3032 3033 3034 3035 3036
	for_each_possible_cpu(cpu) {
		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);

		INIT_LIST_HEAD(&ul->head);
		spin_lock_init(&ul->lock);
	}
3037
#ifdef CONFIG_IP_ROUTE_CLASSID
3038
	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
L
Linus Torvalds 已提交
3039 3040 3041 3042
	if (!ip_rt_acct)
		panic("IP: failed to allocate ip_rt_acct\n");
#endif

A
Alexey Dobriyan 已提交
3043 3044
	ipv4_dst_ops.kmem_cachep =
		kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3045
				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
3046

3047 3048
	ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;

3049 3050 3051 3052 3053 3054
	if (dst_entries_init(&ipv4_dst_ops) < 0)
		panic("IP: failed to allocate ipv4_dst_ops counter\n");

	if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
		panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");

D
David S. Miller 已提交
3055 3056
	ipv4_dst_ops.gc_thresh = ~0;
	ip_rt_max_size = INT_MAX;
L
Linus Torvalds 已提交
3057 3058 3059 3060

	devinet_init();
	ip_fib_init();

3061
	if (ip_rt_proc_init())
J
Joe Perches 已提交
3062
		pr_err("Unable to create route proc files\n");
L
Linus Torvalds 已提交
3063 3064
#ifdef CONFIG_XFRM
	xfrm_init();
3065
	xfrm4_init();
L
Linus Torvalds 已提交
3066
#endif
3067
	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
3068

3069 3070 3071
#ifdef CONFIG_SYSCTL
	register_pernet_subsys(&sysctl_route_ops);
#endif
3072
	register_pernet_subsys(&rt_genid_ops);
3073
	register_pernet_subsys(&ipv4_inetpeer_ops);
L
Linus Torvalds 已提交
3074 3075 3076
	return rc;
}

3077
#ifdef CONFIG_SYSCTL
A
Al Viro 已提交
3078 3079 3080 3081 3082 3083
/*
 * We really need to sanitize the damn ipv4 init order, then all
 * this nonsense will go away.
 */
void __init ip_static_sysctl_init(void)
{
3084
	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
A
Al Viro 已提交
3085
}
3086
#endif