fib_trie.c 60.8 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation; either version
 *   2 of the License, or (at your option) any later version.
 *
 *   Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
 *     & Swedish University of Agricultural Sciences.
 *
10
 *   Jens Laas <jens.laas@data.slu.se> Swedish University of
11
 *     Agricultural Sciences.
12
 *
13 14
 *   Hans Liss <hans.liss@its.uu.se>  Uppsala Universitet
 *
L
Lucas De Marchi 已提交
15
 * This work is based on the LPC-trie which is originally described in:
16
 *
17 18
 * An experimental study of compression methods for dynamic tries
 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19
 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
 *
 *
 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
 *
 *
 * Code from fib_hash has been reused which includes the following header:
 *
 *
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		IPv4 FIB: lookup engine and maintenance routines.
 *
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
R
Robert Olsson 已提交
42 43 44 45 46 47 48
 *
 * Substantial contributions to this work comes from:
 *
 *		David S. Miller, <davem@davemloft.net>
 *		Stephen Hemminger <shemminger@osdl.org>
 *		Paul E. McKenney <paulmck@us.ibm.com>
 *		Patrick McHardy <kaber@trash.net>
49 50
 */

J
Jens Låås 已提交
51
#define VERSION "0.409"
52 53

#include <asm/uaccess.h>
J
Jiri Slaby 已提交
54
#include <linux/bitops.h>
55 56 57 58 59 60 61 62 63
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
S
Stephen Hemminger 已提交
64
#include <linux/inetdevice.h>
65 66 67
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
R
Robert Olsson 已提交
68
#include <linux/rcupdate.h>
69 70 71 72
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/init.h>
#include <linux/list.h>
73
#include <linux/slab.h>
74
#include <linux/export.h>
75
#include <net/net_namespace.h>
76 77 78 79 80 81
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <net/ip_fib.h>
82
#include <net/switchdev.h>
83 84
#include "fib_lookup.h"

R
Robert Olsson 已提交
85
#define MAX_STAT_DEPTH 32
86

87 88
#define KEYLENGTH	(8*sizeof(t_key))
#define KEY_MAX		((t_key)~0)
89 90 91

typedef unsigned int t_key;

92 93
#define IS_TNODE(n) ((n)->bits)
#define IS_LEAF(n) (!(n)->bits)
R
Robert Olsson 已提交
94

95
struct key_vector {
96 97 98 99
	struct rcu_head rcu;

	t_key empty_children; /* KEYLENGTH bits needed */
	t_key full_children;  /* KEYLENGTH bits needed */
100
	struct key_vector __rcu *parent;
101

102 103
	t_key key;
	unsigned char pos;		/* 2log(KEYLENGTH) bits needed */
104
	unsigned char bits;		/* 2log(KEYLENGTH) bits needed */
105
	unsigned char slen;
A
Alexander Duyck 已提交
106
	union {
107
		/* This list pointer if valid if (pos | bits) == 0 (LEAF) */
A
Alexander Duyck 已提交
108
		struct hlist_head leaf;
109
		/* This array is valid if (pos | bits) > 0 (TNODE) */
110
		struct key_vector __rcu *tnode[0];
A
Alexander Duyck 已提交
111
	};
112 113
};

114 115 116 117 118
struct tnode {
	struct key_vector kv[1];
};

#define TNODE_SIZE(n)	offsetof(struct tnode, kv[0].tnode[n])
119 120
#define LEAF_SIZE	TNODE_SIZE(1)

121 122 123 124 125 126 127
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats {
	unsigned int gets;
	unsigned int backtrack;
	unsigned int semantic_match_passed;
	unsigned int semantic_match_miss;
	unsigned int null_node_hit;
128
	unsigned int resize_node_skipped;
129 130 131 132 133 134 135 136 137
};
#endif

struct trie_stat {
	unsigned int totdepth;
	unsigned int maxdepth;
	unsigned int tnodes;
	unsigned int leaves;
	unsigned int nullpointers;
138
	unsigned int prefixes;
R
Robert Olsson 已提交
139
	unsigned int nodesizes[MAX_STAT_DEPTH];
140
};
141 142

struct trie {
143
	struct key_vector __rcu *tnode[1];
144
#ifdef CONFIG_IP_FIB_TRIE_STATS
145
	struct trie_use_stats __percpu *stats;
146 147 148
#endif
};

149
static struct key_vector **resize(struct trie *t, struct key_vector *tn);
150 151 152 153 154 155 156 157
static size_t tnode_free_size;

/*
 * synchronize_rcu after call_rcu for that many pages; it should be especially
 * useful before resizing the root node with PREEMPT_NONE configs; the value was
 * obtained experimentally, aiming to avoid visible slowdown.
 */
static const int sync_pages = 128;
158

159
static struct kmem_cache *fn_alias_kmem __read_mostly;
160
static struct kmem_cache *trie_leaf_kmem __read_mostly;
161

162 163
/* caller must hold RTNL */
#define node_parent(n) rtnl_dereference((n)->parent)
164
#define get_child(tn, i) rtnl_dereference((tn)->tnode[i])
E
Eric Dumazet 已提交
165

166 167
/* caller must hold RCU read lock or RTNL */
#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
168
#define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i])
E
Eric Dumazet 已提交
169

170
/* wrapper for rcu_assign_pointer */
171
static inline void node_set_parent(struct key_vector *n, struct key_vector *tp)
172
{
A
Alexander Duyck 已提交
173 174
	if (n)
		rcu_assign_pointer(n->parent, tp);
S
Stephen Hemminger 已提交
175 176
}

177 178 179 180
#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)

/* This provides us with the number of children in this node, in the case of a
 * leaf this will return 0 meaning none of the children are accessible.
181
 */
182
static inline unsigned long child_length(const struct key_vector *tn)
S
Stephen Hemminger 已提交
183
{
184
	return (1ul << tn->bits) & ~(1ul);
S
Stephen Hemminger 已提交
185
}
R
Robert Olsson 已提交
186

187 188 189 190 191 192 193
static inline unsigned long get_index(t_key key, struct key_vector *kv)
{
	unsigned long index = key ^ kv->key;

	return index >> kv->pos;
}

194 195 196 197 198 199 200
static inline struct fib_table *trie_get_table(struct trie *t)
{
	unsigned long *tb_data = (unsigned long *)t;

	return container_of(tb_data, struct fib_table, tb_data[0]);
}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
/* To understand this stuff, an understanding of keys and all their bits is
 * necessary. Every node in the trie has a key associated with it, but not
 * all of the bits in that key are significant.
 *
 * Consider a node 'n' and its parent 'tp'.
 *
 * If n is a leaf, every bit in its key is significant. Its presence is
 * necessitated by path compression, since during a tree traversal (when
 * searching for a leaf - unless we are doing an insertion) we will completely
 * ignore all skipped bits we encounter. Thus we need to verify, at the end of
 * a potentially successful search, that we have indeed been walking the
 * correct key path.
 *
 * Note that we can never "miss" the correct key in the tree if present by
 * following the wrong path. Path compression ensures that segments of the key
 * that are the same for all keys with a given prefix are skipped, but the
 * skipped part *is* identical for each node in the subtrie below the skipped
 * bit! trie_insert() in this implementation takes care of that.
 *
 * if n is an internal node - a 'tnode' here, the various parts of its key
 * have many different meanings.
 *
 * Example:
 * _________________________________________________________________
 * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
 * -----------------------------------------------------------------
 *  31  30  29  28  27  26  25  24  23  22  21  20  19  18  17  16
 *
 * _________________________________________________________________
 * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
 * -----------------------------------------------------------------
 *  15  14  13  12  11  10   9   8   7   6   5   4   3   2   1   0
 *
 * tp->pos = 22
 * tp->bits = 3
 * n->pos = 13
 * n->bits = 4
 *
 * First, let's just ignore the bits that come before the parent tp, that is
 * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
 * point we do not use them for anything.
 *
 * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
 * index into the parent's child array. That is, they will be used to find
 * 'n' among tp's children.
 *
 * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
 * for the node n.
 *
 * All the bits we have seen so far are significant to the node n. The rest
 * of the bits are really not needed or indeed known in n->key.
 *
 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
 * n's child array, and will of course be different for each child.
 *
 * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
 * at this point.
 */
259

260 261
static const int halve_threshold = 25;
static const int inflate_threshold = 50;
262
static const int halve_threshold_root = 15;
J
Jens Låås 已提交
263
static const int inflate_threshold_root = 30;
R
Robert Olsson 已提交
264 265

static void __alias_free_mem(struct rcu_head *head)
266
{
R
Robert Olsson 已提交
267 268
	struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
	kmem_cache_free(fn_alias_kmem, fa);
269 270
}

R
Robert Olsson 已提交
271
static inline void alias_free_mem_rcu(struct fib_alias *fa)
272
{
R
Robert Olsson 已提交
273 274
	call_rcu(&fa->rcu, __alias_free_mem);
}
O
Olof Johansson 已提交
275

276
#define TNODE_KMALLOC_MAX \
277
	ilog2((PAGE_SIZE - TNODE_SIZE(0)) / sizeof(struct key_vector *))
278
#define TNODE_VMALLOC_MAX \
279
	ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *))
O
Olof Johansson 已提交
280

281
static void __node_free_rcu(struct rcu_head *head)
282
{
283
	struct key_vector *n = container_of(head, struct key_vector, rcu);
284 285 286 287 288 289 290

	if (IS_LEAF(n))
		kmem_cache_free(trie_leaf_kmem, n);
	else if (n->bits <= TNODE_KMALLOC_MAX)
		kfree(n);
	else
		vfree(n);
291 292
}

293 294
#define node_free(n) call_rcu(&n->rcu, __node_free_rcu)

295
static struct tnode *tnode_alloc(int bits)
296
{
297 298 299 300 301 302 303 304 305
	size_t size;

	/* verify bits is within bounds */
	if (bits > TNODE_VMALLOC_MAX)
		return NULL;

	/* determine size and verify it is non-zero and didn't overflow */
	size = TNODE_SIZE(1ul << bits);

R
Robert Olsson 已提交
306
	if (size <= PAGE_SIZE)
307
		return kzalloc(size, GFP_KERNEL);
308
	else
309
		return vzalloc(size);
310
}
R
Robert Olsson 已提交
311

312
static inline void empty_child_inc(struct key_vector *n)
313 314 315 316
{
	++n->empty_children ? : ++n->full_children;
}

317
static inline void empty_child_dec(struct key_vector *n)
318 319 320 321
{
	n->empty_children-- ? : n->full_children--;
}

322
static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
R
Robert Olsson 已提交
323
{
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
	struct tnode *kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
	struct key_vector *l = kv->kv;

	if (!kv)
		return NULL;

	/* initialize key vector */
	l->key = key;
	l->pos = 0;
	l->bits = 0;
	l->slen = fa->fa_slen;

	/* link leaf to fib alias */
	INIT_HLIST_HEAD(&l->leaf);
	hlist_add_head(&fa->fa_list, &l->leaf);

R
Robert Olsson 已提交
340 341 342
	return l;
}

343
static struct key_vector *tnode_new(t_key key, int pos, int bits)
344
{
345
	struct tnode *tnode = tnode_alloc(bits);
346
	unsigned int shift = pos + bits;
347
	struct key_vector *tn = tnode->kv;
348 349 350

	/* verify bits and pos their msb bits clear and values are valid */
	BUG_ON(!bits || (shift > KEYLENGTH));
351

352
	pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
353
		 sizeof(struct key_vector *) << bits);
354 355 356 357 358 359 360 361 362 363 364 365 366 367

	if (!tnode)
		return NULL;

	if (bits == KEYLENGTH)
		tn->full_children = 1;
	else
		tn->empty_children = 1ul << bits;

	tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
	tn->pos = pos;
	tn->bits = bits;
	tn->slen = pos;

368 369 370
	return tn;
}

371
/* Check whether a tnode 'n' is "full", i.e. it is an internal node
372 373
 * and no bits are skipped. See discussion in dyntree paper p. 6
 */
374
static inline int tnode_full(struct key_vector *tn, struct key_vector *n)
375
{
376
	return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
377 378
}

379 380 381
/* Add a child at position i overwriting the old value.
 * Update the value of full_children and empty_children.
 */
382 383
static void put_child(struct key_vector *tn, unsigned long i,
		      struct key_vector *n)
384
{
385
	struct key_vector *chi = get_child(tn, i);
386
	int isfull, wasfull;
387

388
	BUG_ON(i >= child_length(tn));
S
Stephen Hemminger 已提交
389

390
	/* update emptyChildren, overflow into fullChildren */
391
	if (n == NULL && chi != NULL)
392 393 394
		empty_child_inc(tn);
	if (n != NULL && chi == NULL)
		empty_child_dec(tn);
395

396
	/* update fullChildren */
397
	wasfull = tnode_full(tn, chi);
398
	isfull = tnode_full(tn, n);
399

400
	if (wasfull && !isfull)
401
		tn->full_children--;
402
	else if (!wasfull && isfull)
403
		tn->full_children++;
O
Olof Johansson 已提交
404

405 406 407
	if (n && (tn->slen < n->slen))
		tn->slen = n->slen;

408
	rcu_assign_pointer(tn->tnode[i], n);
409 410
}

411
static void update_children(struct key_vector *tn)
412 413 414 415
{
	unsigned long i;

	/* update all of the child parent pointers */
416
	for (i = child_length(tn); i;) {
417
		struct key_vector *inode = get_child(tn, --i);
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432

		if (!inode)
			continue;

		/* Either update the children of a tnode that
		 * already belongs to us or update the child
		 * to point to ourselves.
		 */
		if (node_parent(inode) == tn)
			update_children(inode);
		else
			node_set_parent(inode, tn);
	}
}

433 434
static inline void put_child_root(struct key_vector *tp, struct trie *t,
				  t_key key, struct key_vector *n)
435 436 437 438
{
	if (tp)
		put_child(tp, get_index(key, tp), n);
	else
439
		rcu_assign_pointer(t->tnode[0], n);
440 441
}

442
static inline void tnode_free_init(struct key_vector *tn)
E
Eric Dumazet 已提交
443
{
444 445 446
	tn->rcu.next = NULL;
}

447 448
static inline void tnode_free_append(struct key_vector *tn,
				     struct key_vector *n)
449 450 451 452
{
	n->rcu.next = tn->rcu.next;
	tn->rcu.next = &n->rcu;
}
E
Eric Dumazet 已提交
453

454
static void tnode_free(struct key_vector *tn)
455 456 457 458 459
{
	struct callback_head *head = &tn->rcu;

	while (head) {
		head = head->next;
460
		tnode_free_size += TNODE_SIZE(1ul << tn->bits);
461 462
		node_free(tn);

463
		tn = container_of(head, struct key_vector, rcu);
464 465 466 467 468
	}

	if (tnode_free_size >= PAGE_SIZE * sync_pages) {
		tnode_free_size = 0;
		synchronize_rcu();
E
Eric Dumazet 已提交
469 470 471
	}
}

472 473 474
static struct key_vector __rcu **replace(struct trie *t,
					 struct key_vector *oldtnode,
					 struct key_vector *tn)
475
{
476 477
	struct key_vector *tp = node_parent(oldtnode);
	struct key_vector **cptr;
478 479 480 481 482 483 484 485 486 487 488 489
	unsigned long i;

	/* setup the parent pointer out of and back into this node */
	NODE_INIT_PARENT(tn, tp);
	put_child_root(tp, t, tn->key, tn);

	/* update all of the child parent pointers */
	update_children(tn);

	/* all pointers should be clean so we are done */
	tnode_free(oldtnode);

490
	/* record the pointer that is pointing to this node */
491
	cptr = tp ? tp->tnode : t->tnode;
492

493
	/* resize children now that oldtnode is freed */
494
	for (i = child_length(tn); i;) {
495
		struct key_vector *inode = get_child(tn, --i);
496 497 498 499 500

		/* resize child node */
		if (tnode_full(tn, inode))
			resize(t, inode);
	}
501 502

	return cptr;
503 504
}

505 506
static struct key_vector __rcu **inflate(struct trie *t,
					 struct key_vector *oldtnode)
507
{
508
	struct key_vector *tn;
509
	unsigned long i;
510
	t_key m;
511

S
Stephen Hemminger 已提交
512
	pr_debug("In inflate\n");
513

514
	tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
S
Stephen Hemminger 已提交
515
	if (!tn)
516
		goto notnode;
517

518 519 520
	/* prepare oldtnode to be freed */
	tnode_free_init(oldtnode);

521 522 523 524
	/* Assemble all of the pointers in our cluster, in this case that
	 * represents all of the pointers out of our allocated nodes that
	 * point to existing tnodes and the links between our allocated
	 * nodes.
525
	 */
526
	for (i = child_length(oldtnode), m = 1u << tn->pos; i;) {
527
		struct key_vector *inode = get_child(oldtnode, --i);
528
		struct key_vector *node0, *node1;
529
		unsigned long j, k;
530

531
		/* An empty child */
A
Alexander Duyck 已提交
532
		if (inode == NULL)
533 534 535
			continue;

		/* A leaf or an internal node with skipped bits */
A
Alexander Duyck 已提交
536
		if (!tnode_full(oldtnode, inode)) {
537
			put_child(tn, get_index(inode->key, tn), inode);
538 539 540
			continue;
		}

541 542 543
		/* drop the node in the old tnode free list */
		tnode_free_append(oldtnode, inode);

544 545
		/* An internal node with two children */
		if (inode->bits == 1) {
546 547
			put_child(tn, 2 * i + 1, get_child(inode, 1));
			put_child(tn, 2 * i, get_child(inode, 0));
O
Olof Johansson 已提交
548
			continue;
549 550
		}

O
Olof Johansson 已提交
551
		/* We will replace this node 'inode' with two new
552
		 * ones, 'node0' and 'node1', each with half of the
O
Olof Johansson 已提交
553 554 555 556 557
		 * original children. The two new nodes will have
		 * a position one bit further down the key and this
		 * means that the "significant" part of their keys
		 * (see the discussion near the top of this file)
		 * will differ by one bit, which will be "0" in
558
		 * node0's key and "1" in node1's key. Since we are
O
Olof Johansson 已提交
559 560
		 * moving the key position by one step, the bit that
		 * we are moving away from - the bit at position
561 562 563
		 * (tn->pos) - is the one that will differ between
		 * node0 and node1. So... we synthesize that bit in the
		 * two new keys.
O
Olof Johansson 已提交
564
		 */
565 566 567
		node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1);
		if (!node1)
			goto nomem;
568
		node0 = tnode_new(inode->key, inode->pos, inode->bits - 1);
569

570
		tnode_free_append(tn, node1);
571 572 573 574 575
		if (!node0)
			goto nomem;
		tnode_free_append(tn, node0);

		/* populate child pointers in new nodes */
576
		for (k = child_length(inode), j = k / 2; j;) {
577 578 579 580
			put_child(node1, --j, get_child(inode, --k));
			put_child(node0, j, get_child(inode, j));
			put_child(node1, --j, get_child(inode, --k));
			put_child(node0, j, get_child(inode, j));
581
		}
582

583 584 585
		/* link new nodes to parent */
		NODE_INIT_PARENT(node1, tn);
		NODE_INIT_PARENT(node0, tn);
586

587 588 589 590
		/* link parent to nodes */
		put_child(tn, 2 * i + 1, node1);
		put_child(tn, 2 * i, node0);
	}
591

592
	/* setup the parent pointers into and out of this node */
593
	return replace(t, oldtnode, tn);
594
nomem:
595 596
	/* all pointers should be clean so we are done */
	tnode_free(tn);
597 598
notnode:
	return NULL;
599 600
}

601 602
static struct key_vector __rcu **halve(struct trie *t,
				       struct key_vector *oldtnode)
603
{
604
	struct key_vector *tn;
605
	unsigned long i;
606

S
Stephen Hemminger 已提交
607
	pr_debug("In halve\n");
608

609
	tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
610
	if (!tn)
611
		goto notnode;
612

613 614 615
	/* prepare oldtnode to be freed */
	tnode_free_init(oldtnode);

616 617 618 619
	/* Assemble all of the pointers in our cluster, in this case that
	 * represents all of the pointers out of our allocated nodes that
	 * point to existing tnodes and the links between our allocated
	 * nodes.
620
	 */
621
	for (i = child_length(oldtnode); i;) {
622 623
		struct key_vector *node1 = get_child(oldtnode, --i);
		struct key_vector *node0 = get_child(oldtnode, --i);
624
		struct key_vector *inode;
625

626 627 628 629 630
		/* At least one of the children is empty */
		if (!node1 || !node0) {
			put_child(tn, i / 2, node1 ? : node0);
			continue;
		}
631

632
		/* Two nonempty children */
633
		inode = tnode_new(node0->key, oldtnode->pos, 1);
634 635
		if (!inode)
			goto nomem;
636
		tnode_free_append(tn, inode);
637

638 639 640 641 642 643 644
		/* initialize pointers out of node */
		put_child(inode, 1, node1);
		put_child(inode, 0, node0);
		NODE_INIT_PARENT(inode, tn);

		/* link parent to node */
		put_child(tn, i / 2, inode);
645
	}
646

647
	/* setup the parent pointers into and out of this node */
648 649 650 651 652 653
	return replace(t, oldtnode, tn);
nomem:
	/* all pointers should be clean so we are done */
	tnode_free(tn);
notnode:
	return NULL;
654 655
}

656
static void collapse(struct trie *t, struct key_vector *oldtnode)
657
{
658
	struct key_vector *n, *tp;
659 660 661
	unsigned long i;

	/* scan the tnode looking for that one child that might still exist */
662
	for (n = NULL, i = child_length(oldtnode); !n && i;)
663
		n = get_child(oldtnode, --i);
664 665 666 667 668 669 670 671 672 673

	/* compress one level */
	tp = node_parent(oldtnode);
	put_child_root(tp, t, oldtnode->key, n);
	node_set_parent(n, tp);

	/* drop dead node */
	node_free(oldtnode);
}

674
static unsigned char update_suffix(struct key_vector *tn)
675 676 677 678 679 680 681 682 683
{
	unsigned char slen = tn->pos;
	unsigned long stride, i;

	/* search though the list of children looking for nodes that might
	 * have a suffix greater than the one we currently have.  This is
	 * why we start with a stride of 2 since a stride of 1 would
	 * represent the nodes with suffix length equal to tn->pos
	 */
684
	for (i = 0, stride = 0x2ul ; i < child_length(tn); i += stride) {
685
		struct key_vector *n = get_child(tn, i);
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708

		if (!n || (n->slen <= slen))
			continue;

		/* update stride and slen based on new value */
		stride <<= (n->slen - slen);
		slen = n->slen;
		i &= ~(stride - 1);

		/* if slen covers all but the last bit we can stop here
		 * there will be nothing longer than that since only node
		 * 0 and 1 << (bits - 1) could have that as their suffix
		 * length.
		 */
		if ((slen + 1) >= (tn->pos + tn->bits))
			break;
	}

	tn->slen = slen;

	return slen;
}

709 710 711 712 713 714 715 716
/* From "Implementing a dynamic compressed trie" by Stefan Nilsson of
 * the Helsinki University of Technology and Matti Tikkanen of Nokia
 * Telecommunications, page 6:
 * "A node is doubled if the ratio of non-empty children to all
 * children in the *doubled* node is at least 'high'."
 *
 * 'high' in this instance is the variable 'inflate_threshold'. It
 * is expressed as a percentage, so we multiply it with
717
 * child_length() and instead of multiplying by 2 (since the
718 719 720 721
 * child array will be doubled by inflate()) and multiplying
 * the left-hand side by 100 (to handle the percentage thing) we
 * multiply the left-hand side by 50.
 *
722
 * The left-hand side may look a bit weird: child_length(tn)
723 724 725 726 727 728 729 730 731
 * - tn->empty_children is of course the number of non-null children
 * in the current node. tn->full_children is the number of "full"
 * children, that is non-null tnodes with a skip value of 0.
 * All of those will be doubled in the resulting inflated tnode, so
 * we just count them one extra time here.
 *
 * A clearer way to write this would be:
 *
 * to_be_doubled = tn->full_children;
732
 * not_to_be_doubled = child_length(tn) - tn->empty_children -
733 734
 *     tn->full_children;
 *
735
 * new_child_length = child_length(tn) * 2;
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
 *
 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
 *      new_child_length;
 * if (new_fill_factor >= inflate_threshold)
 *
 * ...and so on, tho it would mess up the while () loop.
 *
 * anyway,
 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
 *      inflate_threshold
 *
 * avoid a division:
 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
 *      inflate_threshold * new_child_length
 *
 * expand not_to_be_doubled and to_be_doubled, and shorten:
752
 * 100 * (child_length(tn) - tn->empty_children +
753 754 755
 *    tn->full_children) >= inflate_threshold * new_child_length
 *
 * expand new_child_length:
756
 * 100 * (child_length(tn) - tn->empty_children +
757
 *    tn->full_children) >=
758
 *      inflate_threshold * child_length(tn) * 2
759 760
 *
 * shorten again:
761
 * 50 * (tn->full_children + child_length(tn) -
762
 *    tn->empty_children) >= inflate_threshold *
763
 *    child_length(tn)
764 765
 *
 */
766
static inline bool should_inflate(struct key_vector *tp, struct key_vector *tn)
767
{
768
	unsigned long used = child_length(tn);
769 770 771
	unsigned long threshold = used;

	/* Keep root node larger */
772
	threshold *= tp ? inflate_threshold : inflate_threshold_root;
773
	used -= tn->empty_children;
774
	used += tn->full_children;
775

776 777 778
	/* if bits == KEYLENGTH then pos = 0, and will fail below */

	return (used > 1) && tn->pos && ((50 * used) >= threshold);
779 780
}

781
static inline bool should_halve(struct key_vector *tp, struct key_vector *tn)
782
{
783
	unsigned long used = child_length(tn);
784 785 786
	unsigned long threshold = used;

	/* Keep root node larger */
787
	threshold *= tp ? halve_threshold : halve_threshold_root;
788 789
	used -= tn->empty_children;

790 791 792 793 794
	/* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */

	return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold);
}

795
static inline bool should_collapse(struct key_vector *tn)
796
{
797
	unsigned long used = child_length(tn);
798 799 800 801 802 803 804 805 806

	used -= tn->empty_children;

	/* account for bits == KEYLENGTH case */
	if ((tn->bits == KEYLENGTH) && tn->full_children)
		used -= KEY_MAX;

	/* One child or none, time to drop us from the trie */
	return used < 2;
807 808
}

809
#define MAX_WORK 10
810 811
static struct key_vector __rcu **resize(struct trie *t,
					struct key_vector *tn)
812
{
813 814 815
#ifdef CONFIG_IP_FIB_TRIE_STATS
	struct trie_use_stats __percpu *stats = t->stats;
#endif
816
	struct key_vector *tp = node_parent(tn);
817
	unsigned long cindex = tp ? get_index(tn->key, tp) : 0;
818
	struct key_vector __rcu **cptr = tp ? tp->tnode : t->tnode;
819
	int max_work = MAX_WORK;
820 821 822 823

	pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
		 tn, inflate_threshold, halve_threshold);

824 825 826 827
	/* track the tnode via the pointer from the parent instead of
	 * doing it ourselves.  This way we can let RCU fully do its
	 * thing without us interfering
	 */
828
	BUG_ON(tn != rtnl_dereference(cptr[cindex]));
829

830 831
	/* Double as long as the resulting node has a number of
	 * nonempty nodes that are above the threshold.
832
	 */
833
	while (should_inflate(tp, tn) && max_work) {
834
		struct key_vector __rcu **tcptr = inflate(t, tn);
835 836

		if (!tcptr) {
837
#ifdef CONFIG_IP_FIB_TRIE_STATS
838
			this_cpu_inc(stats->resize_node_skipped);
839 840 841
#endif
			break;
		}
842

843
		max_work--;
844 845
		cptr = tcptr;
		tn = rtnl_dereference(cptr[cindex]);
846 847 848 849
	}

	/* Return if at least one inflate is run */
	if (max_work != MAX_WORK)
850
		return cptr;
851

852
	/* Halve as long as the number of empty children in this
853 854
	 * node is above threshold.
	 */
855
	while (should_halve(tp, tn) && max_work) {
856
		struct key_vector __rcu **tcptr = halve(t, tn);
857 858

		if (!tcptr) {
859
#ifdef CONFIG_IP_FIB_TRIE_STATS
860
			this_cpu_inc(stats->resize_node_skipped);
861 862 863 864
#endif
			break;
		}

865
		max_work--;
866 867
		cptr = tcptr;
		tn = rtnl_dereference(cptr[cindex]);
868
	}
869 870

	/* Only one child remains */
871 872
	if (should_collapse(tn)) {
		collapse(t, tn);
873
		return cptr;
874 875 876 877
	}

	/* Return if at least one deflate was run */
	if (max_work != MAX_WORK)
878
		return cptr;
879 880 881 882 883 884 885

	/* push the suffix length to the parent node */
	if (tn->slen > tn->pos) {
		unsigned char slen = update_suffix(tn);

		if (tp && (slen > tp->slen))
			tp->slen = slen;
886
	}
887 888

	return cptr;
889 890
}

891
static void leaf_pull_suffix(struct key_vector *tp, struct key_vector *l)
892 893 894 895 896 897 898 899
{
	while (tp && (tp->slen > tp->pos) && (tp->slen > l->slen)) {
		if (update_suffix(tp) > l->slen)
			break;
		tp = node_parent(tp);
	}
}

900
static void leaf_push_suffix(struct key_vector *tn, struct key_vector *l)
901
{
902 903 904 905 906 907 908 909 910
	/* if this is a new leaf then tn will be NULL and we can sort
	 * out parent suffix lengths as a part of trie_rebalance
	 */
	while (tn && (tn->slen < l->slen)) {
		tn->slen = l->slen;
		tn = node_parent(tn);
	}
}

R
Robert Olsson 已提交
911
/* rcu_read_lock needs to be hold by caller from readside */
912 913
static struct key_vector *fib_find_node(struct trie *t,
					struct key_vector **tp, u32 key)
914
{
915
	struct key_vector *pn = NULL, *n = rcu_dereference_rtnl(t->tnode[0]);
A
Alexander Duyck 已提交
916 917 918 919 920 921 922 923 924

	while (n) {
		unsigned long index = get_index(key, n);

		/* This bit of code is a bit tricky but it combines multiple
		 * checks into a single check.  The prefix consists of the
		 * prefix plus zeros for the bits in the cindex. The index
		 * is the difference between the key and this value.  From
		 * this we can actually derive several pieces of data.
925
		 *   if (index >= (1ul << bits))
A
Alexander Duyck 已提交
926
		 *     we have a mismatch in skip bits and failed
927 928
		 *   else
		 *     we know the value is cindex
929 930 931 932
		 *
		 * This check is safe even if bits == KEYLENGTH due to the
		 * fact that we can only allocate a node with 32 bits if a
		 * long is greater than 32 bits.
A
Alexander Duyck 已提交
933
		 */
934 935 936 937
		if (index >= (1ul << n->bits)) {
			n = NULL;
			break;
		}
A
Alexander Duyck 已提交
938 939 940

		/* we have found a leaf. Prefixes have already been compared */
		if (IS_LEAF(n))
941 942
			break;

943
		pn = n;
944
		n = get_child_rcu(n, index);
A
Alexander Duyck 已提交
945
	}
O
Olof Johansson 已提交
946

947
	*tp = pn;
948

A
Alexander Duyck 已提交
949
	return n;
950 951
}

952 953 954
/* Return the first fib alias matching TOS with
 * priority less than or equal to PRIO.
 */
A
Alexander Duyck 已提交
955 956
static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen,
					u8 tos, u32 prio)
957 958 959 960 961 962
{
	struct fib_alias *fa;

	if (!fah)
		return NULL;

963
	hlist_for_each_entry(fa, fah, fa_list) {
A
Alexander Duyck 已提交
964 965 966 967
		if (fa->fa_slen < slen)
			continue;
		if (fa->fa_slen != slen)
			break;
968 969 970 971 972 973 974 975 976
		if (fa->fa_tos > tos)
			continue;
		if (fa->fa_info->fib_priority >= prio || fa->fa_tos < tos)
			return fa;
	}

	return NULL;
}

977
static void trie_rebalance(struct trie *t, struct key_vector *tn)
978
{
979
	struct key_vector __rcu **cptr = t->tnode;
980

981
	while (tn) {
982
		struct key_vector *tp = node_parent(tn);
983 984 985 986

		cptr = resize(t, tn);
		if (!tp)
			break;
987
		tn = container_of(cptr, struct key_vector, tnode[0]);
988 989 990
	}
}

991
static int fib_insert_node(struct trie *t, struct key_vector *tp,
992
			   struct fib_alias *new, t_key key)
993
{
994
	struct key_vector *n, *l;
995

996
	l = leaf_new(key, new);
A
Alexander Duyck 已提交
997
	if (!l)
998
		goto noleaf;
999 1000 1001

	/* retrieve child from parent node */
	if (tp)
1002
		n = get_child(tp, get_index(key, tp));
1003
	else
1004
		n = rcu_dereference_rtnl(t->tnode[0]);
1005

1006 1007 1008 1009 1010 1011 1012
	/* Case 2: n is a LEAF or a TNODE and the key doesn't match.
	 *
	 *  Add a new tnode here
	 *  first tnode need some special handling
	 *  leaves us in position for handling as case 3
	 */
	if (n) {
1013
		struct key_vector *tn;
1014

1015
		tn = tnode_new(key, __fls(key ^ n->key), 1);
1016 1017
		if (!tn)
			goto notnode;
O
Olof Johansson 已提交
1018

1019 1020 1021
		/* initialize routes out of node */
		NODE_INIT_PARENT(tn, tp);
		put_child(tn, get_index(key, tn) ^ 1, n);
1022

1023 1024 1025
		/* start adding routes into the node */
		put_child_root(tp, t, key, tn);
		node_set_parent(n, tn);
1026

1027
		/* parent now has a NULL spot where the leaf can go */
1028
		tp = tn;
1029
	}
O
Olof Johansson 已提交
1030

1031
	/* Case 3: n is NULL, and will just insert a new leaf */
1032 1033 1034 1035 1036
	NODE_INIT_PARENT(l, tp);
	put_child_root(tp, t, key, l);
	trie_rebalance(t, tp);

	return 0;
1037 1038 1039 1040
notnode:
	node_free(l);
noleaf:
	return -ENOMEM;
1041 1042
}

1043 1044
static int fib_insert_alias(struct trie *t, struct key_vector *tp,
			    struct key_vector *l, struct fib_alias *new,
1045 1046 1047 1048 1049 1050 1051
			    struct fib_alias *fa, t_key key)
{
	if (!l)
		return fib_insert_node(t, tp, new, key);

	if (fa) {
		hlist_add_before_rcu(&new->fa_list, &fa->fa_list);
1052
	} else {
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
		struct fib_alias *last;

		hlist_for_each_entry(last, &l->leaf, fa_list) {
			if (new->fa_slen < last->fa_slen)
				break;
			fa = last;
		}

		if (fa)
			hlist_add_behind_rcu(&new->fa_list, &fa->fa_list);
		else
			hlist_add_head_rcu(&new->fa_list, &l->leaf);
1065
	}
R
Robert Olsson 已提交
1066

1067 1068 1069 1070 1071 1072 1073
	/* if we added to the tail node then we need to update slen */
	if (l->slen < new->fa_slen) {
		l->slen = new->fa_slen;
		leaf_push_suffix(tp, l);
	}

	return 0;
1074 1075
}

1076
/* Caller must hold RTNL. */
1077
int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1078
{
1079
	struct trie *t = (struct trie *)tb->tb_data;
1080
	struct fib_alias *fa, *new_fa;
1081
	struct key_vector *l, *tp;
1082
	struct fib_info *fi;
A
Alexander Duyck 已提交
1083 1084
	u8 plen = cfg->fc_dst_len;
	u8 slen = KEYLENGTH - plen;
1085
	u8 tos = cfg->fc_tos;
1086
	u32 key;
1087 1088
	int err;

1089
	if (plen > KEYLENGTH)
1090 1091
		return -EINVAL;

1092
	key = ntohl(cfg->fc_dst);
1093

1094
	pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1095

1096
	if ((plen < KEYLENGTH) && (key << plen))
1097 1098
		return -EINVAL;

1099 1100 1101
	fi = fib_create_info(cfg);
	if (IS_ERR(fi)) {
		err = PTR_ERR(fi);
1102
		goto err;
1103
	}
1104

1105
	l = fib_find_node(t, &tp, key);
A
Alexander Duyck 已提交
1106
	fa = l ? fib_find_alias(&l->leaf, slen, tos, fi->fib_priority) : NULL;
1107 1108 1109 1110 1111 1112

	/* Now fa, if non-NULL, points to the first fib alias
	 * with the same keys [prefix,tos,priority], if such key already
	 * exists or to the node before which we will insert new one.
	 *
	 * If fa is NULL, we will need to allocate a new one and
1113 1114
	 * insert to the tail of the section matching the suffix length
	 * of the new alias.
1115 1116
	 */

1117 1118 1119
	if (fa && fa->fa_tos == tos &&
	    fa->fa_info->fib_priority == fi->fib_priority) {
		struct fib_alias *fa_first, *fa_match;
1120 1121

		err = -EEXIST;
1122
		if (cfg->fc_nlflags & NLM_F_EXCL)
1123 1124
			goto out;

1125 1126 1127 1128 1129 1130 1131
		/* We have 2 goals:
		 * 1. Find exact match for type, scope, fib_info to avoid
		 * duplicate routes
		 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
		 */
		fa_match = NULL;
		fa_first = fa;
1132
		hlist_for_each_entry_from(fa, fa_list) {
A
Alexander Duyck 已提交
1133
			if ((fa->fa_slen != slen) || (fa->fa_tos != tos))
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
				break;
			if (fa->fa_info->fib_priority != fi->fib_priority)
				break;
			if (fa->fa_type == cfg->fc_type &&
			    fa->fa_info == fi) {
				fa_match = fa;
				break;
			}
		}

1144
		if (cfg->fc_nlflags & NLM_F_REPLACE) {
1145 1146 1147
			struct fib_info *fi_drop;
			u8 state;

1148 1149 1150 1151
			fa = fa_first;
			if (fa_match) {
				if (fa == fa_match)
					err = 0;
1152
				goto out;
1153
			}
R
Robert Olsson 已提交
1154
			err = -ENOBUFS;
1155
			new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
R
Robert Olsson 已提交
1156 1157
			if (new_fa == NULL)
				goto out;
1158 1159

			fi_drop = fa->fa_info;
R
Robert Olsson 已提交
1160 1161
			new_fa->fa_tos = fa->fa_tos;
			new_fa->fa_info = fi;
1162
			new_fa->fa_type = cfg->fc_type;
1163
			state = fa->fa_state;
1164
			new_fa->fa_state = state & ~FA_S_ACCESSED;
1165
			new_fa->fa_slen = fa->fa_slen;
1166

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
			err = netdev_switch_fib_ipv4_add(key, plen, fi,
							 new_fa->fa_tos,
							 cfg->fc_type,
							 tb->tb_id);
			if (err) {
				netdev_switch_fib_ipv4_abort(fi);
				kmem_cache_free(fn_alias_kmem, new_fa);
				goto out;
			}

1177
			hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1178

R
Robert Olsson 已提交
1179
			alias_free_mem_rcu(fa);
1180 1181 1182

			fib_release_info(fi_drop);
			if (state & FA_S_ACCESSED)
1183
				rt_cache_flush(cfg->fc_nlinfo.nl_net);
1184 1185
			rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
				tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1186

O
Olof Johansson 已提交
1187
			goto succeeded;
1188 1189 1190 1191 1192
		}
		/* Error if we find a perfect match which
		 * uses the same scope, type, and nexthop
		 * information.
		 */
1193 1194
		if (fa_match)
			goto out;
1195

1196
		if (!(cfg->fc_nlflags & NLM_F_APPEND))
1197
			fa = fa_first;
1198 1199
	}
	err = -ENOENT;
1200
	if (!(cfg->fc_nlflags & NLM_F_CREATE))
1201 1202 1203
		goto out;

	err = -ENOBUFS;
1204
	new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1205 1206 1207 1208 1209
	if (new_fa == NULL)
		goto out;

	new_fa->fa_info = fi;
	new_fa->fa_tos = tos;
1210
	new_fa->fa_type = cfg->fc_type;
1211
	new_fa->fa_state = 0;
A
Alexander Duyck 已提交
1212
	new_fa->fa_slen = slen;
1213

1214 1215 1216 1217 1218 1219 1220 1221
	/* (Optionally) offload fib entry to switch hardware. */
	err = netdev_switch_fib_ipv4_add(key, plen, fi, tos,
					 cfg->fc_type, tb->tb_id);
	if (err) {
		netdev_switch_fib_ipv4_abort(fi);
		goto out_free_new_fa;
	}

1222
	/* Insert new entry to the list. */
1223 1224
	err = fib_insert_alias(t, tp, l, new_fa, fa, key);
	if (err)
1225
		goto out_sw_fib_del;
1226

1227 1228 1229
	if (!plen)
		tb->tb_num_default++;

1230
	rt_cache_flush(cfg->fc_nlinfo.nl_net);
1231
	rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1232
		  &cfg->fc_nlinfo, 0);
1233 1234
succeeded:
	return 0;
1235

1236 1237
out_sw_fib_del:
	netdev_switch_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
1238 1239
out_free_new_fa:
	kmem_cache_free(fn_alias_kmem, new_fa);
1240 1241
out:
	fib_release_info(fi);
O
Olof Johansson 已提交
1242
err:
1243 1244 1245
	return err;
}

1246
static inline t_key prefix_mismatch(t_key key, struct key_vector *n)
1247 1248 1249 1250 1251 1252
{
	t_key prefix = n->key;

	return (key ^ prefix) & (prefix | -prefix);
}

1253
/* should be called with rcu_read_lock */
1254
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
E
Eric Dumazet 已提交
1255
		     struct fib_result *res, int fib_flags)
1256
{
1257
	struct trie *t = (struct trie *)tb->tb_data;
1258 1259 1260
#ifdef CONFIG_IP_FIB_TRIE_STATS
	struct trie_use_stats __percpu *stats = t->stats;
#endif
1261
	const t_key key = ntohl(flp->daddr);
1262
	struct key_vector *n, *pn;
A
Alexander Duyck 已提交
1263
	struct fib_alias *fa;
1264
	unsigned long index;
1265
	t_key cindex;
O
Olof Johansson 已提交
1266

1267
	n = rcu_dereference(t->tnode[0]);
1268
	if (!n)
1269
		return -EAGAIN;
1270 1271

#ifdef CONFIG_IP_FIB_TRIE_STATS
1272
	this_cpu_inc(stats->gets);
1273 1274
#endif

A
Alexander Duyck 已提交
1275
	pn = n;
1276 1277 1278 1279
	cindex = 0;

	/* Step 1: Travel to the longest prefix match in the trie */
	for (;;) {
1280
		index = get_index(key, n);
1281 1282 1283 1284 1285 1286

		/* This bit of code is a bit tricky but it combines multiple
		 * checks into a single check.  The prefix consists of the
		 * prefix plus zeros for the "bits" in the prefix. The index
		 * is the difference between the key and this value.  From
		 * this we can actually derive several pieces of data.
1287
		 *   if (index >= (1ul << bits))
1288
		 *     we have a mismatch in skip bits and failed
1289 1290
		 *   else
		 *     we know the value is cindex
1291 1292 1293 1294
		 *
		 * This check is safe even if bits == KEYLENGTH due to the
		 * fact that we can only allocate a node with 32 bits if a
		 * long is greater than 32 bits.
1295
		 */
1296
		if (index >= (1ul << n->bits))
1297
			break;
1298

1299 1300
		/* we have found a leaf. Prefixes have already been compared */
		if (IS_LEAF(n))
1301
			goto found;
1302

1303 1304
		/* only record pn and cindex if we are going to be chopping
		 * bits later.  Otherwise we are just wasting cycles.
O
Olof Johansson 已提交
1305
		 */
1306
		if (n->slen > n->pos) {
1307 1308
			pn = n;
			cindex = index;
O
Olof Johansson 已提交
1309
		}
1310

1311
		n = get_child_rcu(n, index);
1312 1313 1314
		if (unlikely(!n))
			goto backtrace;
	}
1315

1316 1317 1318
	/* Step 2: Sort out leaves and begin backtracing for longest prefix */
	for (;;) {
		/* record the pointer where our next node pointer is stored */
1319
		struct key_vector __rcu **cptr = n->tnode;
1320

1321 1322 1323
		/* This test verifies that none of the bits that differ
		 * between the key and the prefix exist in the region of
		 * the lsb and higher in the prefix.
O
Olof Johansson 已提交
1324
		 */
1325
		if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos))
1326
			goto backtrace;
O
Olof Johansson 已提交
1327

1328 1329 1330
		/* exit out and process leaf */
		if (unlikely(IS_LEAF(n)))
			break;
O
Olof Johansson 已提交
1331

1332 1333 1334
		/* Don't bother recording parent info.  Since we are in
		 * prefix match mode we will have to come back to wherever
		 * we started this traversal anyway
O
Olof Johansson 已提交
1335 1336
		 */

1337
		while ((n = rcu_dereference(*cptr)) == NULL) {
1338 1339
backtrace:
#ifdef CONFIG_IP_FIB_TRIE_STATS
1340 1341
			if (!n)
				this_cpu_inc(stats->null_node_hit);
1342
#endif
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
			/* If we are at cindex 0 there are no more bits for
			 * us to strip at this level so we must ascend back
			 * up one level to see if there are any more bits to
			 * be stripped there.
			 */
			while (!cindex) {
				t_key pkey = pn->key;

				pn = node_parent_rcu(pn);
				if (unlikely(!pn))
1353
					return -EAGAIN;
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
#ifdef CONFIG_IP_FIB_TRIE_STATS
				this_cpu_inc(stats->backtrack);
#endif
				/* Get Child's index */
				cindex = get_index(pkey, pn);
			}

			/* strip the least significant bit from the cindex */
			cindex &= cindex - 1;

			/* grab pointer for next child node */
1365
			cptr = &pn->tnode[cindex];
1366
		}
1367
	}
1368

1369
found:
1370 1371 1372
	/* this line carries forward the xor from earlier in the function */
	index = key ^ n->key;

1373
	/* Step 3: Process the leaf, if that fails fall back to backtracing */
A
Alexander Duyck 已提交
1374 1375 1376
	hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
		struct fib_info *fi = fa->fa_info;
		int nhsel, err;
1377

1378
		if ((index >= (1ul << fa->fa_slen)) &&
A
Alexander Duyck 已提交
1379
		    ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen != KEYLENGTH)))
1380
			continue;
A
Alexander Duyck 已提交
1381 1382 1383 1384 1385 1386 1387 1388 1389
		if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
			continue;
		if (fi->fib_dead)
			continue;
		if (fa->fa_info->fib_scope < flp->flowi4_scope)
			continue;
		fib_alias_accessed(fa);
		err = fib_props[fa->fa_type].error;
		if (unlikely(err < 0)) {
1390
#ifdef CONFIG_IP_FIB_TRIE_STATS
A
Alexander Duyck 已提交
1391
			this_cpu_inc(stats->semantic_match_passed);
1392
#endif
A
Alexander Duyck 已提交
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
			return err;
		}
		if (fi->fib_flags & RTNH_F_DEAD)
			continue;
		for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
			const struct fib_nh *nh = &fi->fib_nh[nhsel];

			if (nh->nh_flags & RTNH_F_DEAD)
				continue;
			if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
1403
				continue;
A
Alexander Duyck 已提交
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414

			if (!(fib_flags & FIB_LOOKUP_NOREF))
				atomic_inc(&fi->fib_clntref);

			res->prefixlen = KEYLENGTH - fa->fa_slen;
			res->nh_sel = nhsel;
			res->type = fa->fa_type;
			res->scope = fi->fib_scope;
			res->fi = fi;
			res->table = tb;
			res->fa_head = &n->leaf;
1415
#ifdef CONFIG_IP_FIB_TRIE_STATS
A
Alexander Duyck 已提交
1416
			this_cpu_inc(stats->semantic_match_passed);
1417
#endif
A
Alexander Duyck 已提交
1418
			return err;
1419
		}
1420
	}
1421
#ifdef CONFIG_IP_FIB_TRIE_STATS
1422
	this_cpu_inc(stats->semantic_match_miss);
1423 1424
#endif
	goto backtrace;
1425
}
1426
EXPORT_SYMBOL_GPL(fib_table_lookup);
1427

1428 1429
static void fib_remove_alias(struct trie *t, struct key_vector *tp,
			     struct key_vector *l, struct fib_alias *old)
1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457
{
	/* record the location of the previous list_info entry */
	struct hlist_node **pprev = old->fa_list.pprev;
	struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next);

	/* remove the fib_alias from the list */
	hlist_del_rcu(&old->fa_list);

	/* if we emptied the list this leaf will be freed and we can sort
	 * out parent suffix lengths as a part of trie_rebalance
	 */
	if (hlist_empty(&l->leaf)) {
		put_child_root(tp, t, l->key, NULL);
		node_free(l);
		trie_rebalance(t, tp);
		return;
	}

	/* only access fa if it is pointing at the last valid hlist_node */
	if (*pprev)
		return;

	/* update the trie with the latest suffix length */
	l->slen = fa->fa_slen;
	leaf_pull_suffix(tp, l);
}

/* Caller must hold RTNL. */
1458
int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
1459 1460 1461
{
	struct trie *t = (struct trie *) tb->tb_data;
	struct fib_alias *fa, *fa_to_delete;
1462
	struct key_vector *l, *tp;
A
Alexander Duyck 已提交
1463 1464
	u8 plen = cfg->fc_dst_len;
	u8 slen = KEYLENGTH - plen;
1465 1466
	u8 tos = cfg->fc_tos;
	u32 key;
O
Olof Johansson 已提交
1467

A
Alexander Duyck 已提交
1468
	if (plen > KEYLENGTH)
1469 1470
		return -EINVAL;

1471
	key = ntohl(cfg->fc_dst);
1472

1473
	if ((plen < KEYLENGTH) && (key << plen))
1474 1475
		return -EINVAL;

1476
	l = fib_find_node(t, &tp, key);
1477
	if (!l)
1478 1479
		return -ESRCH;

A
Alexander Duyck 已提交
1480
	fa = fib_find_alias(&l->leaf, slen, tos, 0);
1481 1482 1483
	if (!fa)
		return -ESRCH;

S
Stephen Hemminger 已提交
1484
	pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1485 1486

	fa_to_delete = NULL;
1487
	hlist_for_each_entry_from(fa, fa_list) {
1488 1489
		struct fib_info *fi = fa->fa_info;

A
Alexander Duyck 已提交
1490
		if ((fa->fa_slen != slen) || (fa->fa_tos != tos))
1491 1492
			break;

1493 1494
		if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
		    (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1495
		     fa->fa_info->fib_scope == cfg->fc_scope) &&
1496 1497
		    (!cfg->fc_prefsrc ||
		     fi->fib_prefsrc == cfg->fc_prefsrc) &&
1498 1499 1500
		    (!cfg->fc_protocol ||
		     fi->fib_protocol == cfg->fc_protocol) &&
		    fib_nh_match(cfg, fi) == 0) {
1501 1502 1503 1504 1505
			fa_to_delete = fa;
			break;
		}
	}

O
Olof Johansson 已提交
1506 1507
	if (!fa_to_delete)
		return -ESRCH;
1508

1509 1510 1511
	netdev_switch_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos,
				   cfg->fc_type, tb->tb_id);

1512
	rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
1513
		  &cfg->fc_nlinfo, 0);
O
Olof Johansson 已提交
1514

1515 1516 1517
	if (!plen)
		tb->tb_num_default--;

1518
	fib_remove_alias(t, tp, l, fa_to_delete);
1519

1520
	if (fa_to_delete->fa_state & FA_S_ACCESSED)
1521
		rt_cache_flush(cfg->fc_nlinfo.nl_net);
1522

1523 1524
	fib_release_info(fa_to_delete->fa_info);
	alias_free_mem_rcu(fa_to_delete);
O
Olof Johansson 已提交
1525
	return 0;
1526 1527
}

1528
/* Scan for the next leaf starting at the provided key value */
1529
static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
1530
{
1531
	struct key_vector *pn, *n = *tn;
1532
	unsigned long cindex;
1533

1534 1535 1536
	/* record parent node for backtracing */
	pn = n;
	cindex = n ? get_index(key, n) : 0;
1537

1538 1539 1540
	/* this loop is meant to try and find the key in the trie */
	while (n) {
		unsigned long idx = get_index(key, n);
1541

1542 1543 1544 1545 1546
		/* guarantee forward progress on the keys */
		if (IS_LEAF(n) && (n->key >= key))
			goto found;
		if (idx >= (1ul << n->bits))
			break;
1547

1548 1549 1550
		/* record parent and next child index */
		pn = n;
		cindex = idx;
1551

1552
		/* descend into the next child */
1553
		n = get_child_rcu(pn, cindex++);
1554
	}
1555

1556 1557 1558 1559 1560
	/* this loop will search for the next leaf with a greater key */
	while (pn) {
		/* if we exhausted the parent node we will need to climb */
		if (cindex >= (1ul << pn->bits)) {
			t_key pkey = pn->key;
1561

1562 1563 1564
			pn = node_parent_rcu(pn);
			if (!pn)
				break;
1565

1566 1567 1568
			cindex = get_index(pkey, pn) + 1;
			continue;
		}
1569

1570
		/* grab the next available node */
1571
		n = get_child_rcu(pn, cindex++);
1572 1573
		if (!n)
			continue;
1574

1575 1576 1577
		/* no need to compare keys since we bumped the index */
		if (IS_LEAF(n))
			goto found;
1578

1579 1580 1581 1582
		/* Rescan start scanning in new node */
		pn = n;
		cindex = 0;
	}
S
Stephen Hemminger 已提交
1583

1584 1585 1586 1587 1588 1589
	*tn = pn;
	return NULL; /* Root of trie */
found:
	/* if we are at the limit for keys just return NULL for the tnode */
	*tn = (n->key == KEY_MAX) ? NULL : pn;
	return n;
1590 1591
}

1592 1593 1594 1595 1596
/* Caller must hold RTNL */
void fib_table_flush_external(struct fib_table *tb)
{
	struct trie *t = (struct trie *)tb->tb_data;
	struct fib_alias *fa;
1597
	struct key_vector *n, *pn;
1598 1599
	unsigned long cindex;

1600
	n = rcu_dereference(t->tnode[0]);
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
	if (!n)
		return;

	pn = NULL;
	cindex = 0;

	while (IS_TNODE(n)) {
		/* record pn and cindex for leaf walking */
		pn = n;
		cindex = 1ul << n->bits;
backtrace:
		/* walk trie in reverse order */
		do {
			while (!(cindex--)) {
				t_key pkey = pn->key;

				/* if we got the root we are done */
1618
				pn = node_parent(pn);
1619 1620 1621 1622 1623 1624 1625
				if (!pn)
					return;

				cindex = get_index(pkey, pn);
			}

			/* grab the next available node */
1626
			n = get_child(pn, cindex);
1627 1628 1629 1630 1631 1632
		} while (!n);
	}

	hlist_for_each_entry(fa, &n->leaf, fa_list) {
		struct fib_info *fi = fa->fa_info;

1633 1634 1635 1636 1637 1638 1639
		if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL))
			continue;

		netdev_switch_fib_ipv4_del(n->key,
					   KEYLENGTH - fa->fa_slen,
					   fi, fa->fa_tos,
					   fa->fa_type, tb->tb_id);
1640 1641 1642 1643 1644 1645 1646
	}

	/* if trie is leaf only loop is completed */
	if (pn)
		goto backtrace;
}

1647
/* Caller must hold RTNL. */
1648
int fib_table_flush(struct fib_table *tb)
1649
{
1650
	struct trie *t = (struct trie *)tb->tb_data;
1651
	struct key_vector *n, *pn;
1652 1653 1654 1655
	struct hlist_node *tmp;
	struct fib_alias *fa;
	unsigned long cindex;
	unsigned char slen;
1656
	int found = 0;
1657

1658
	n = rcu_dereference(t->tnode[0]);
1659 1660
	if (!n)
		goto flush_complete;
1661

1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
	pn = NULL;
	cindex = 0;

	while (IS_TNODE(n)) {
		/* record pn and cindex for leaf walking */
		pn = n;
		cindex = 1ul << n->bits;
backtrace:
		/* walk trie in reverse order */
		do {
			while (!(cindex--)) {
1673
				struct key_vector __rcu **cptr;
1674 1675 1676 1677 1678 1679
				t_key pkey = pn->key;

				n = pn;
				pn = node_parent(n);

				/* resize completed node */
1680
				cptr = resize(t, n);
1681 1682 1683 1684 1685

				/* if we got the root we are done */
				if (!pn)
					goto flush_complete;

1686 1687
				pn = container_of(cptr, struct key_vector,
						  tnode[0]);
1688 1689 1690 1691
				cindex = get_index(pkey, pn);
			}

			/* grab the next available node */
1692
			n = get_child(pn, cindex);
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
		} while (!n);
	}

	/* track slen in case any prefixes survive */
	slen = 0;

	hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
		struct fib_info *fi = fa->fa_info;

		if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1703 1704 1705 1706
			netdev_switch_fib_ipv4_del(n->key,
						   KEYLENGTH - fa->fa_slen,
						   fi, fa->fa_tos,
						   fa->fa_type, tb->tb_id);
1707 1708 1709 1710 1711 1712
			hlist_del_rcu(&fa->fa_list);
			fib_release_info(fa->fa_info);
			alias_free_mem_rcu(fa);
			found++;

			continue;
1713 1714
		}

1715
		slen = fa->fa_slen;
1716 1717
	}

1718 1719 1720 1721 1722 1723 1724
	/* update leaf slen */
	n->slen = slen;

	if (hlist_empty(&n->leaf)) {
		put_child_root(pn, t, n->key, NULL);
		node_free(n);
	} else {
1725
		leaf_pull_suffix(pn, n);
1726
	}
1727

1728 1729 1730 1731
	/* if trie is leaf only loop is completed */
	if (pn)
		goto backtrace;
flush_complete:
S
Stephen Hemminger 已提交
1732
	pr_debug("trie_flush found=%d\n", found);
1733 1734 1735
	return found;
}

1736
static void __trie_free_rcu(struct rcu_head *head)
1737
{
1738
	struct fib_table *tb = container_of(head, struct fib_table, rcu);
1739 1740 1741 1742 1743
#ifdef CONFIG_IP_FIB_TRIE_STATS
	struct trie *t = (struct trie *)tb->tb_data;

	free_percpu(t->stats);
#endif /* CONFIG_IP_FIB_TRIE_STATS */
1744 1745 1746
	kfree(tb);
}

1747 1748 1749 1750 1751
void fib_free_table(struct fib_table *tb)
{
	call_rcu(&tb->rcu, __trie_free_rcu);
}

1752
static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
A
Alexander Duyck 已提交
1753
			     struct sk_buff *skb, struct netlink_callback *cb)
1754
{
A
Alexander Duyck 已提交
1755
	__be32 xkey = htonl(l->key);
1756
	struct fib_alias *fa;
A
Alexander Duyck 已提交
1757
	int i, s_i;
1758

A
Alexander Duyck 已提交
1759
	s_i = cb->args[4];
1760 1761
	i = 0;

R
Robert Olsson 已提交
1762
	/* rcu_read_lock is hold by caller */
A
Alexander Duyck 已提交
1763
	hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
1764 1765 1766 1767 1768
		if (i < s_i) {
			i++;
			continue;
		}

1769
		if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
1770 1771 1772 1773
				  cb->nlh->nlmsg_seq,
				  RTM_NEWROUTE,
				  tb->tb_id,
				  fa->fa_type,
1774
				  xkey,
1775
				  KEYLENGTH - fa->fa_slen,
1776
				  fa->fa_tos,
1777
				  fa->fa_info, NLM_F_MULTI) < 0) {
1778
			cb->args[4] = i;
1779 1780
			return -1;
		}
1781
		i++;
1782
	}
1783

1784
	cb->args[4] = i;
1785 1786 1787
	return skb->len;
}

1788
/* rcu_read_lock needs to be hold by caller from readside */
1789 1790
int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
		   struct netlink_callback *cb)
1791
{
1792
	struct trie *t = (struct trie *)tb->tb_data;
1793
	struct key_vector *l, *tp;
1794 1795 1796
	/* Dump starting at last key.
	 * Note: 0.0.0.0/0 (ie default) is first key.
	 */
1797 1798
	int count = cb->args[2];
	t_key key = cb->args[3];
1799

1800
	tp = rcu_dereference_rtnl(t->tnode[0]);
1801 1802

	while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
1803
		if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
1804 1805
			cb->args[3] = key;
			cb->args[2] = count;
1806
			return -1;
1807
		}
1808

1809
		++count;
1810 1811
		key = l->key + 1;

1812 1813
		memset(&cb->args[4], 0,
		       sizeof(cb->args) - 4*sizeof(cb->args[0]));
1814 1815 1816 1817

		/* stop loop if key wrapped back to 0 */
		if (key < l->key)
			break;
1818
	}
1819 1820 1821 1822

	cb->args[3] = key;
	cb->args[2] = count;

1823 1824 1825
	return skb->len;
}

1826
void __init fib_trie_init(void)
1827
{
1828 1829
	fn_alias_kmem = kmem_cache_create("ip_fib_alias",
					  sizeof(struct fib_alias),
1830 1831 1832
					  0, SLAB_PANIC, NULL);

	trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
1833
					   LEAF_SIZE,
1834
					   0, SLAB_PANIC, NULL);
1835
}
1836

1837

1838
struct fib_table *fib_trie_table(u32 id)
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
{
	struct fib_table *tb;
	struct trie *t;

	tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
		     GFP_KERNEL);
	if (tb == NULL)
		return NULL;

	tb->tb_id = id;
1849
	tb->tb_default = -1;
1850
	tb->tb_num_default = 0;
1851 1852

	t = (struct trie *) tb->tb_data;
1853
	RCU_INIT_POINTER(t->tnode[0], NULL);
1854 1855 1856 1857 1858 1859 1860
#ifdef CONFIG_IP_FIB_TRIE_STATS
	t->stats = alloc_percpu(struct trie_use_stats);
	if (!t->stats) {
		kfree(tb);
		tb = NULL;
	}
#endif
1861 1862 1863 1864

	return tb;
}

1865 1866 1867
#ifdef CONFIG_PROC_FS
/* Depth first Trie walk iterator */
struct fib_trie_iter {
1868
	struct seq_net_private p;
1869
	struct fib_table *tb;
1870
	struct key_vector *tnode;
E
Eric Dumazet 已提交
1871 1872
	unsigned int index;
	unsigned int depth;
1873
};
1874

1875
static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter)
1876
{
1877
	unsigned long cindex = iter->index;
1878 1879
	struct key_vector *tn = iter->tnode;
	struct key_vector *p;
1880

1881 1882 1883 1884
	/* A single entry routing table */
	if (!tn)
		return NULL;

1885 1886 1887
	pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
		 iter->tnode, iter->index, iter->depth);
rescan:
1888
	while (cindex < child_length(tn)) {
1889
		struct key_vector *n = get_child_rcu(tn, cindex);
1890

1891 1892 1893 1894 1895 1896
		if (n) {
			if (IS_LEAF(n)) {
				iter->tnode = tn;
				iter->index = cindex + 1;
			} else {
				/* push down one level */
A
Alexander Duyck 已提交
1897
				iter->tnode = n;
1898 1899 1900 1901 1902
				iter->index = 0;
				++iter->depth;
			}
			return n;
		}
1903

1904 1905
		++cindex;
	}
O
Olof Johansson 已提交
1906

1907
	/* Current node exhausted, pop back up */
A
Alexander Duyck 已提交
1908
	p = node_parent_rcu(tn);
1909
	if (p) {
1910
		cindex = get_index(tn->key, p) + 1;
1911 1912 1913
		tn = p;
		--iter->depth;
		goto rescan;
1914
	}
1915 1916 1917

	/* got root? */
	return NULL;
1918 1919
}

1920 1921
static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
					     struct trie *t)
1922
{
1923
	struct key_vector *n;
1924

S
Stephen Hemminger 已提交
1925
	if (!t)
1926 1927
		return NULL;

1928
	n = rcu_dereference(t->tnode[0]);
1929
	if (!n)
1930
		return NULL;
1931

1932
	if (IS_TNODE(n)) {
A
Alexander Duyck 已提交
1933
		iter->tnode = n;
1934 1935 1936 1937 1938 1939
		iter->index = 0;
		iter->depth = 1;
	} else {
		iter->tnode = NULL;
		iter->index = 0;
		iter->depth = 0;
O
Olof Johansson 已提交
1940
	}
1941 1942

	return n;
1943
}
O
Olof Johansson 已提交
1944

1945 1946
static void trie_collect_stats(struct trie *t, struct trie_stat *s)
{
1947
	struct key_vector *n;
1948
	struct fib_trie_iter iter;
O
Olof Johansson 已提交
1949

1950
	memset(s, 0, sizeof(*s));
O
Olof Johansson 已提交
1951

1952
	rcu_read_lock();
1953
	for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
1954
		if (IS_LEAF(n)) {
A
Alexander Duyck 已提交
1955
			struct fib_alias *fa;
1956

1957 1958 1959 1960
			s->leaves++;
			s->totdepth += iter.depth;
			if (iter.depth > s->maxdepth)
				s->maxdepth = iter.depth;
1961

A
Alexander Duyck 已提交
1962
			hlist_for_each_entry_rcu(fa, &n->leaf, fa_list)
1963
				++s->prefixes;
1964 1965
		} else {
			s->tnodes++;
A
Alexander Duyck 已提交
1966 1967
			if (n->bits < MAX_STAT_DEPTH)
				s->nodesizes[n->bits]++;
1968
			s->nullpointers += n->empty_children;
1969 1970
		}
	}
R
Robert Olsson 已提交
1971
	rcu_read_unlock();
1972 1973
}

1974 1975 1976 1977
/*
 *	This outputs /proc/net/fib_triestats
 */
static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
1978
{
E
Eric Dumazet 已提交
1979
	unsigned int i, max, pointers, bytes, avdepth;
1980

1981 1982 1983 1984
	if (stat->leaves)
		avdepth = stat->totdepth*100 / stat->leaves;
	else
		avdepth = 0;
O
Olof Johansson 已提交
1985

1986 1987
	seq_printf(seq, "\tAver depth:     %u.%02d\n",
		   avdepth / 100, avdepth % 100);
1988
	seq_printf(seq, "\tMax depth:      %u\n", stat->maxdepth);
O
Olof Johansson 已提交
1989

1990
	seq_printf(seq, "\tLeaves:         %u\n", stat->leaves);
1991
	bytes = LEAF_SIZE * stat->leaves;
1992 1993

	seq_printf(seq, "\tPrefixes:       %u\n", stat->prefixes);
A
Alexander Duyck 已提交
1994
	bytes += sizeof(struct fib_alias) * stat->prefixes;
1995

1996
	seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
1997
	bytes += TNODE_SIZE(0) * stat->tnodes;
1998

R
Robert Olsson 已提交
1999 2000
	max = MAX_STAT_DEPTH;
	while (max > 0 && stat->nodesizes[max-1] == 0)
2001
		max--;
2002

2003
	pointers = 0;
2004
	for (i = 1; i < max; i++)
2005
		if (stat->nodesizes[i] != 0) {
2006
			seq_printf(seq, "  %u: %u",  i, stat->nodesizes[i]);
2007 2008 2009
			pointers += (1<<i) * stat->nodesizes[i];
		}
	seq_putc(seq, '\n');
2010
	seq_printf(seq, "\tPointers: %u\n", pointers);
R
Robert Olsson 已提交
2011

2012
	bytes += sizeof(struct key_vector *) * pointers;
2013 2014
	seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
	seq_printf(seq, "Total size: %u  kB\n", (bytes + 1023) / 1024);
2015
}
R
Robert Olsson 已提交
2016

2017
#ifdef CONFIG_IP_FIB_TRIE_STATS
2018
static void trie_show_usage(struct seq_file *seq,
2019
			    const struct trie_use_stats __percpu *stats)
2020
{
2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035
	struct trie_use_stats s = { 0 };
	int cpu;

	/* loop through all of the CPUs and gather up the stats */
	for_each_possible_cpu(cpu) {
		const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);

		s.gets += pcpu->gets;
		s.backtrack += pcpu->backtrack;
		s.semantic_match_passed += pcpu->semantic_match_passed;
		s.semantic_match_miss += pcpu->semantic_match_miss;
		s.null_node_hit += pcpu->null_node_hit;
		s.resize_node_skipped += pcpu->resize_node_skipped;
	}

2036
	seq_printf(seq, "\nCounters:\n---------\n");
2037 2038
	seq_printf(seq, "gets = %u\n", s.gets);
	seq_printf(seq, "backtracks = %u\n", s.backtrack);
2039
	seq_printf(seq, "semantic match passed = %u\n",
2040 2041 2042 2043
		   s.semantic_match_passed);
	seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
	seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
	seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
2044
}
2045 2046
#endif /*  CONFIG_IP_FIB_TRIE_STATS */

2047
static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
2048
{
2049 2050 2051 2052 2053 2054
	if (tb->tb_id == RT_TABLE_LOCAL)
		seq_puts(seq, "Local:\n");
	else if (tb->tb_id == RT_TABLE_MAIN)
		seq_puts(seq, "Main:\n");
	else
		seq_printf(seq, "Id %d:\n", tb->tb_id);
2055
}
2056

2057

2058 2059
static int fib_triestat_seq_show(struct seq_file *seq, void *v)
{
2060
	struct net *net = (struct net *)seq->private;
2061
	unsigned int h;
2062

2063
	seq_printf(seq,
2064 2065
		   "Basic info: size of leaf:"
		   " %Zd bytes, size of tnode: %Zd bytes.\n",
2066
		   LEAF_SIZE, TNODE_SIZE(0));
2067

2068 2069 2070 2071
	for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
		struct hlist_head *head = &net->ipv4.fib_table_hash[h];
		struct fib_table *tb;

2072
		hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2073 2074
			struct trie *t = (struct trie *) tb->tb_data;
			struct trie_stat stat;
2075

2076 2077 2078 2079 2080 2081 2082 2083
			if (!t)
				continue;

			fib_table_print(seq, tb);

			trie_collect_stats(t, &stat);
			trie_show_stats(seq, &stat);
#ifdef CONFIG_IP_FIB_TRIE_STATS
2084
			trie_show_usage(seq, t->stats);
2085 2086 2087
#endif
		}
	}
2088

2089
	return 0;
2090 2091
}

2092
static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2093
{
2094
	return single_open_net(inode, file, fib_triestat_seq_show);
2095 2096
}

2097
static const struct file_operations fib_triestat_fops = {
2098 2099 2100 2101
	.owner	= THIS_MODULE,
	.open	= fib_triestat_seq_open,
	.read	= seq_read,
	.llseek	= seq_lseek,
2102
	.release = single_release_net,
2103 2104
};

2105
static struct key_vector *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
2106
{
2107 2108
	struct fib_trie_iter *iter = seq->private;
	struct net *net = seq_file_net(seq);
2109
	loff_t idx = 0;
2110
	unsigned int h;
2111

2112 2113 2114
	for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
		struct hlist_head *head = &net->ipv4.fib_table_hash[h];
		struct fib_table *tb;
2115

2116
		hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2117
			struct key_vector *n;
2118 2119 2120 2121 2122 2123 2124 2125 2126

			for (n = fib_trie_get_first(iter,
						    (struct trie *) tb->tb_data);
			     n; n = fib_trie_get_next(iter))
				if (pos == idx++) {
					iter->tb = tb;
					return n;
				}
		}
2127
	}
2128

2129 2130 2131
	return NULL;
}

2132
static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2133
	__acquires(RCU)
2134
{
2135
	rcu_read_lock();
2136
	return fib_trie_get_idx(seq, *pos);
2137 2138
}

2139
static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2140
{
2141
	struct fib_trie_iter *iter = seq->private;
2142
	struct net *net = seq_file_net(seq);
2143 2144 2145
	struct fib_table *tb = iter->tb;
	struct hlist_node *tb_node;
	unsigned int h;
2146
	struct key_vector *n;
2147

2148
	++*pos;
2149 2150 2151 2152
	/* next node in same table */
	n = fib_trie_get_next(iter);
	if (n)
		return n;
2153

2154 2155
	/* walk rest of this hash chain */
	h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
E
Eric Dumazet 已提交
2156
	while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
2157 2158 2159 2160 2161
		tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
		n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
		if (n)
			goto found;
	}
2162

2163 2164 2165
	/* new hash chain */
	while (++h < FIB_TABLE_HASHSZ) {
		struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2166
		hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2167 2168 2169 2170 2171
			n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
			if (n)
				goto found;
		}
	}
2172
	return NULL;
2173 2174 2175 2176

found:
	iter->tb = tb;
	return n;
2177
}
2178

2179
static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2180
	__releases(RCU)
2181
{
2182 2183
	rcu_read_unlock();
}
O
Olof Johansson 已提交
2184

2185 2186
static void seq_indent(struct seq_file *seq, int n)
{
E
Eric Dumazet 已提交
2187 2188
	while (n-- > 0)
		seq_puts(seq, "   ");
2189
}
2190

2191
static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
2192
{
S
Stephen Hemminger 已提交
2193
	switch (s) {
2194 2195 2196 2197 2198 2199
	case RT_SCOPE_UNIVERSE: return "universe";
	case RT_SCOPE_SITE:	return "site";
	case RT_SCOPE_LINK:	return "link";
	case RT_SCOPE_HOST:	return "host";
	case RT_SCOPE_NOWHERE:	return "nowhere";
	default:
2200
		snprintf(buf, len, "scope=%d", s);
2201 2202 2203
		return buf;
	}
}
2204

2205
static const char *const rtn_type_names[__RTN_MAX] = {
2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
	[RTN_UNSPEC] = "UNSPEC",
	[RTN_UNICAST] = "UNICAST",
	[RTN_LOCAL] = "LOCAL",
	[RTN_BROADCAST] = "BROADCAST",
	[RTN_ANYCAST] = "ANYCAST",
	[RTN_MULTICAST] = "MULTICAST",
	[RTN_BLACKHOLE] = "BLACKHOLE",
	[RTN_UNREACHABLE] = "UNREACHABLE",
	[RTN_PROHIBIT] = "PROHIBIT",
	[RTN_THROW] = "THROW",
	[RTN_NAT] = "NAT",
	[RTN_XRESOLVE] = "XRESOLVE",
};
2219

E
Eric Dumazet 已提交
2220
static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
2221 2222 2223
{
	if (t < __RTN_MAX && rtn_type_names[t])
		return rtn_type_names[t];
2224
	snprintf(buf, len, "type %u", t);
2225
	return buf;
2226 2227
}

2228 2229
/* Pretty print the trie */
static int fib_trie_seq_show(struct seq_file *seq, void *v)
2230
{
2231
	const struct fib_trie_iter *iter = seq->private;
2232
	struct key_vector *n = v;
2233

2234 2235
	if (!node_parent_rcu(n))
		fib_table_print(seq, iter->tb);
2236

2237
	if (IS_TNODE(n)) {
A
Alexander Duyck 已提交
2238
		__be32 prf = htonl(n->key);
O
Olof Johansson 已提交
2239

2240 2241 2242 2243
		seq_indent(seq, iter->depth-1);
		seq_printf(seq, "  +-- %pI4/%zu %u %u %u\n",
			   &prf, KEYLENGTH - n->pos - n->bits, n->bits,
			   n->full_children, n->empty_children);
2244
	} else {
A
Alexander Duyck 已提交
2245
		__be32 val = htonl(n->key);
A
Alexander Duyck 已提交
2246
		struct fib_alias *fa;
2247 2248

		seq_indent(seq, iter->depth);
2249
		seq_printf(seq, "  |-- %pI4\n", &val);
2250

A
Alexander Duyck 已提交
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263
		hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
			char buf1[32], buf2[32];

			seq_indent(seq, iter->depth + 1);
			seq_printf(seq, "  /%zu %s %s",
				   KEYLENGTH - fa->fa_slen,
				   rtn_scope(buf1, sizeof(buf1),
					     fa->fa_info->fib_scope),
				   rtn_type(buf2, sizeof(buf2),
					    fa->fa_type));
			if (fa->fa_tos)
				seq_printf(seq, " tos=%d", fa->fa_tos);
			seq_putc(seq, '\n');
2264
		}
2265
	}
2266

2267 2268 2269
	return 0;
}

2270
static const struct seq_operations fib_trie_seq_ops = {
2271 2272 2273 2274
	.start  = fib_trie_seq_start,
	.next   = fib_trie_seq_next,
	.stop   = fib_trie_seq_stop,
	.show   = fib_trie_seq_show,
2275 2276
};

2277
static int fib_trie_seq_open(struct inode *inode, struct file *file)
2278
{
2279 2280
	return seq_open_net(inode, file, &fib_trie_seq_ops,
			    sizeof(struct fib_trie_iter));
2281 2282
}

2283
static const struct file_operations fib_trie_fops = {
2284 2285 2286 2287
	.owner  = THIS_MODULE,
	.open   = fib_trie_seq_open,
	.read   = seq_read,
	.llseek = seq_lseek,
2288
	.release = seq_release_net,
2289 2290
};

2291 2292
struct fib_route_iter {
	struct seq_net_private p;
2293
	struct fib_table *main_tb;
2294
	struct key_vector *tnode;
2295 2296 2297 2298
	loff_t	pos;
	t_key	key;
};

2299 2300
static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
					    loff_t pos)
2301
{
2302
	struct fib_table *tb = iter->main_tb;
2303
	struct key_vector *l, **tp = &iter->tnode;
2304 2305
	struct trie *t;
	t_key key;
2306

2307 2308
	/* use cache location of next-to-find key */
	if (iter->pos > 0 && pos >= iter->pos) {
2309
		pos -= iter->pos;
2310 2311 2312
		key = iter->key;
	} else {
		t = (struct trie *)tb->tb_data;
2313
		iter->tnode = rcu_dereference_rtnl(t->tnode[0]);
2314
		iter->pos = 0;
2315
		key = 0;
2316 2317
	}

2318 2319
	while ((l = leaf_walk_rcu(tp, key)) != NULL) {
		key = l->key + 1;
2320
		iter->pos++;
2321 2322 2323 2324 2325 2326 2327 2328 2329

		if (pos-- <= 0)
			break;

		l = NULL;

		/* handle unlikely case of a key wrap */
		if (!key)
			break;
2330 2331 2332
	}

	if (l)
2333
		iter->key = key;	/* remember it */
2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
	else
		iter->pos = 0;		/* forget it */

	return l;
}

static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
	__acquires(RCU)
{
	struct fib_route_iter *iter = seq->private;
	struct fib_table *tb;
2345
	struct trie *t;
2346 2347

	rcu_read_lock();
2348

2349
	tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
2350 2351 2352
	if (!tb)
		return NULL;

2353 2354 2355 2356 2357 2358
	iter->main_tb = tb;

	if (*pos != 0)
		return fib_route_get_idx(iter, *pos);

	t = (struct trie *)tb->tb_data;
2359
	iter->tnode = rcu_dereference_rtnl(t->tnode[0]);
2360 2361 2362 2363
	iter->pos = 0;
	iter->key = 0;

	return SEQ_START_TOKEN;
2364 2365 2366 2367 2368
}

static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct fib_route_iter *iter = seq->private;
2369
	struct key_vector *l = NULL;
2370
	t_key key = iter->key;
2371 2372

	++*pos;
2373 2374 2375 2376 2377 2378 2379

	/* only allow key of 0 for start of sequence */
	if ((v == SEQ_START_TOKEN) || key)
		l = leaf_walk_rcu(&iter->tnode, key);

	if (l) {
		iter->key = l->key + 1;
2380
		iter->pos++;
2381 2382
	} else {
		iter->pos = 0;
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
	}

	return l;
}

static void fib_route_seq_stop(struct seq_file *seq, void *v)
	__releases(RCU)
{
	rcu_read_unlock();
}

E
Eric Dumazet 已提交
2394
static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2395
{
E
Eric Dumazet 已提交
2396
	unsigned int flags = 0;
2397

E
Eric Dumazet 已提交
2398 2399
	if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
		flags = RTF_REJECT;
2400 2401
	if (fi && fi->fib_nh->nh_gw)
		flags |= RTF_GATEWAY;
A
Al Viro 已提交
2402
	if (mask == htonl(0xFFFFFFFF))
2403 2404 2405
		flags |= RTF_HOST;
	flags |= RTF_UP;
	return flags;
2406 2407
}

2408 2409 2410
/*
 *	This outputs /proc/net/route.
 *	The format of the file is not supposed to be changed
E
Eric Dumazet 已提交
2411
 *	and needs to be same as fib_hash output to avoid breaking
2412 2413 2414
 *	legacy utilities
 */
static int fib_route_seq_show(struct seq_file *seq, void *v)
2415
{
A
Alexander Duyck 已提交
2416
	struct fib_alias *fa;
2417
	struct key_vector *l = v;
2418
	__be32 prefix;
2419

2420 2421 2422 2423 2424 2425
	if (v == SEQ_START_TOKEN) {
		seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
			   "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
			   "\tWindow\tIRTT");
		return 0;
	}
2426

2427 2428
	prefix = htonl(l->key);

A
Alexander Duyck 已提交
2429 2430 2431 2432
	hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
		const struct fib_info *fi = fa->fa_info;
		__be32 mask = inet_make_mask(KEYLENGTH - fa->fa_slen);
		unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
2433

A
Alexander Duyck 已提交
2434 2435 2436
		if ((fa->fa_type == RTN_BROADCAST) ||
		    (fa->fa_type == RTN_MULTICAST))
			continue;
2437

A
Alexander Duyck 已提交
2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
		seq_setwidth(seq, 127);

		if (fi)
			seq_printf(seq,
				   "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
				   "%d\t%08X\t%d\t%u\t%u",
				   fi->fib_dev ? fi->fib_dev->name : "*",
				   prefix,
				   fi->fib_nh->nh_gw, flags, 0, 0,
				   fi->fib_priority,
				   mask,
				   (fi->fib_advmss ?
				    fi->fib_advmss + 40 : 0),
				   fi->fib_window,
				   fi->fib_rtt >> 3);
		else
			seq_printf(seq,
				   "*\t%08X\t%08X\t%04X\t%d\t%u\t"
				   "%d\t%08X\t%d\t%u\t%u",
				   prefix, 0, flags, 0, 0, 0,
				   mask, 0, 0, 0);
2459

A
Alexander Duyck 已提交
2460
		seq_pad(seq, '\n');
2461 2462 2463 2464 2465
	}

	return 0;
}

2466
static const struct seq_operations fib_route_seq_ops = {
2467 2468 2469
	.start  = fib_route_seq_start,
	.next   = fib_route_seq_next,
	.stop   = fib_route_seq_stop,
2470
	.show   = fib_route_seq_show,
2471 2472
};

2473
static int fib_route_seq_open(struct inode *inode, struct file *file)
2474
{
2475
	return seq_open_net(inode, file, &fib_route_seq_ops,
2476
			    sizeof(struct fib_route_iter));
2477 2478
}

2479
static const struct file_operations fib_route_fops = {
2480 2481 2482 2483
	.owner  = THIS_MODULE,
	.open   = fib_route_seq_open,
	.read   = seq_read,
	.llseek = seq_lseek,
2484
	.release = seq_release_net,
2485 2486
};

2487
int __net_init fib_proc_init(struct net *net)
2488
{
2489
	if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
2490 2491
		goto out1;

2492 2493
	if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
			 &fib_triestat_fops))
2494 2495
		goto out2;

2496
	if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
2497 2498
		goto out3;

2499
	return 0;
2500 2501

out3:
2502
	remove_proc_entry("fib_triestat", net->proc_net);
2503
out2:
2504
	remove_proc_entry("fib_trie", net->proc_net);
2505 2506
out1:
	return -ENOMEM;
2507 2508
}

2509
void __net_exit fib_proc_exit(struct net *net)
2510
{
2511 2512 2513
	remove_proc_entry("fib_trie", net->proc_net);
	remove_proc_entry("fib_triestat", net->proc_net);
	remove_proc_entry("route", net->proc_net);
2514 2515 2516
}

#endif /* CONFIG_PROC_FS */