inetpeer.c 17.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *		INETPEER - A storage for permanent information about peers
 *
 *  This source is covered by the GNU GPL, the same as all kernel sources.
 *
 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <linux/timer.h>
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/net.h>
20
#include <net/ip.h>
L
Linus Torvalds 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
#include <net/inetpeer.h>

/*
 *  Theory of operations.
 *  We keep one entry for each peer IP address.  The nodes contains long-living
 *  information about the peer which doesn't depend on routes.
 *  At this moment this information consists only of ID field for the next
 *  outgoing IP packet.  This field is incremented with each packet as encoded
 *  in inet_getid() function (include/net/inetpeer.h).
 *  At the moment of writing this notes identifier of IP packets is generated
 *  to be unpredictable using this code only for packets subjected
 *  (actually or potentially) to defragmentation.  I.e. DF packets less than
 *  PMTU in size uses a constant ID and do not use this code (see
 *  ip_select_ident() in include/net/ip.h).
 *
 *  Route cache entries hold references to our nodes.
 *  New cache entries get references via lookup by destination IP address in
 *  the avl tree.  The reference is grabbed only when it's needed i.e. only
 *  when we try to output IP packet which needs an unpredictable ID (see
 *  __ip_select_ident() in net/ipv4/route.c).
 *  Nodes are removed only when reference counter goes to 0.
 *  When it's happened the node may be removed when a sufficient amount of
 *  time has been passed since its last use.  The less-recently-used entry can
 *  also be removed if the pool is overloaded i.e. if the total amount of
 *  entries is greater-or-equal than the threshold.
 *
 *  Node pool is organised as an AVL tree.
 *  Such an implementation has been chosen not just for fun.  It's a way to
 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
 *  amount of long living nodes in a single hash slot would significantly delay
 *  lookups performed with disabled BHs.
 *
 *  Serialisation issues.
E
Eric Dumazet 已提交
54 55
 *  1.  Nodes may appear in the tree only with the pool lock held.
 *  2.  Nodes may disappear from the tree only with the pool lock held
L
Linus Torvalds 已提交
56 57 58 59 60 61
 *      AND reference count being 0.
 *  3.  Nodes appears and disappears from unused node list only under
 *      "inet_peer_unused_lock".
 *  4.  Global variable peer_total is modified under the pool lock.
 *  5.  struct inet_peer fields modification:
 *		avl_left, avl_right, avl_parent, avl_height: pool lock
62
 *		unused: unused node list lock
L
Linus Torvalds 已提交
63 64 65
 *		refcnt: atomically against modifications on other CPU;
 *		   usually under some other lock to prevent node disappearing
 *		dtime: unused node list lock
66
 *		daddr: unchangeable
67
 *		ip_id_count: atomic value (no lock needed)
L
Linus Torvalds 已提交
68 69
 */

70
static struct kmem_cache *peer_cachep __read_mostly;
L
Linus Torvalds 已提交
71 72

#define node_height(x) x->avl_height
E
Eric Dumazet 已提交
73 74

#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
E
Eric Dumazet 已提交
75
#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
E
Eric Dumazet 已提交
76
static const struct inet_peer peer_fake_node = {
E
Eric Dumazet 已提交
77 78
	.avl_left	= peer_avl_empty_rcu,
	.avl_right	= peer_avl_empty_rcu,
L
Linus Torvalds 已提交
79 80
	.avl_height	= 0
};
E
Eric Dumazet 已提交
81

82
static struct inet_peer_base {
E
Eric Dumazet 已提交
83
	struct inet_peer __rcu *root;
E
Eric Dumazet 已提交
84
	spinlock_t	lock;
E
Eric Dumazet 已提交
85
	int		total;
86
} v4_peers = {
E
Eric Dumazet 已提交
87
	.root		= peer_avl_empty_rcu,
88
	.lock		= __SPIN_LOCK_UNLOCKED(v4_peers.lock),
E
Eric Dumazet 已提交
89 90
	.total		= 0,
};
L
Linus Torvalds 已提交
91 92 93
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */

/* Exported for sysctl_net_ipv4.  */
E
Eric Dumazet 已提交
94
int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
L
Linus Torvalds 已提交
95
					 * aggressively at this stage */
E
Eric Dumazet 已提交
96 97 98 99
int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
int inet_peer_gc_mintime __read_mostly = 10 * HZ;
int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
L
Linus Torvalds 已提交
100

E
Eric Dumazet 已提交
101 102 103 104 105 106 107
static struct {
	struct list_head	list;
	spinlock_t		lock;
} unused_peers = {
	.list			= LIST_HEAD_INIT(unused_peers.list),
	.lock			= __SPIN_LOCK_UNLOCKED(unused_peers.lock),
};
L
Linus Torvalds 已提交
108 109

static void peer_check_expire(unsigned long dummy);
110
static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
L
Linus Torvalds 已提交
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132


/* Called from ip_output.c:ip_init  */
void __init inet_initpeers(void)
{
	struct sysinfo si;

	/* Use the straight interface to information about memory. */
	si_meminfo(&si);
	/* The values below were suggested by Alexey Kuznetsov
	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
	 * myself.  --SAW
	 */
	if (si.totalram <= (32768*1024)/PAGE_SIZE)
		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
	if (si.totalram <= (16384*1024)/PAGE_SIZE)
		inet_peer_threshold >>= 1; /* about 512KB */
	if (si.totalram <= (8192*1024)/PAGE_SIZE)
		inet_peer_threshold >>= 2; /* about 128KB */

	peer_cachep = kmem_cache_create("inet_peer_cache",
			sizeof(struct inet_peer),
133
			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
134
			NULL);
L
Linus Torvalds 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147

	/* All the timers, started at system startup tend
	   to synchronize. Perturb it a bit.
	 */
	peer_periodic_timer.expires = jiffies
		+ net_random() % inet_peer_gc_maxtime
		+ inet_peer_gc_maxtime;
	add_timer(&peer_periodic_timer);
}

/* Called with or without local BH being disabled. */
static void unlink_from_unused(struct inet_peer *p)
{
E
Eric Dumazet 已提交
148 149 150 151 152
	if (!list_empty(&p->unused)) {
		spin_lock_bh(&unused_peers.lock);
		list_del_init(&p->unused);
		spin_unlock_bh(&unused_peers.lock);
	}
L
Linus Torvalds 已提交
153 154
}

E
Eric Dumazet 已提交
155 156 157
/*
 * Called with local BH disabled and the pool lock held.
 */
158
#define lookup(_daddr, _stack, _base)				\
L
Linus Torvalds 已提交
159
({								\
E
Eric Dumazet 已提交
160 161
	struct inet_peer *u;					\
	struct inet_peer __rcu **v;				\
E
Eric Dumazet 已提交
162 163
								\
	stackptr = _stack;					\
164 165 166
	*stackptr++ = &_base->root;				\
	for (u = rcu_dereference_protected(_base->root,		\
			lockdep_is_held(&_base->lock));		\
E
Eric Dumazet 已提交
167
	     u != peer_avl_empty; ) {				\
168
		if (_daddr == u->daddr.a4)			\
L
Linus Torvalds 已提交
169
			break;					\
170
		if ((__force __u32)_daddr < (__force __u32)u->daddr.a4)	\
L
Linus Torvalds 已提交
171 172 173
			v = &u->avl_left;			\
		else						\
			v = &u->avl_right;			\
E
Eric Dumazet 已提交
174
		*stackptr++ = v;				\
E
Eric Dumazet 已提交
175
		u = rcu_dereference_protected(*v,		\
176
			lockdep_is_held(&_base->lock));		\
L
Linus Torvalds 已提交
177 178 179 180
	}							\
	u;							\
})

E
Eric Dumazet 已提交
181 182 183 184 185 186 187
/*
 * Called with rcu_read_lock_bh()
 * Because we hold no lock against a writer, its quite possible we fall
 * in an endless loop.
 * But every pointer we follow is guaranteed to be valid thanks to RCU.
 * We exit from this function if number of links exceeds PEER_MAXDEPTH
 */
188
static struct inet_peer *lookup_rcu_bh(__be32 daddr, struct inet_peer_base *base)
E
Eric Dumazet 已提交
189
{
190
	struct inet_peer *u = rcu_dereference_bh(base->root);
E
Eric Dumazet 已提交
191 192 193
	int count = 0;

	while (u != peer_avl_empty) {
194
		if (daddr == u->daddr.a4) {
195 196 197 198 199 200
			/* Before taking a reference, check if this entry was
			 * deleted, unlink_from_pool() sets refcnt=-1 to make
			 * distinction between an unused entry (refcnt=0) and
			 * a freed one.
			 */
			if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
E
Eric Dumazet 已提交
201 202 203
				u = NULL;
			return u;
		}
204
		if ((__force __u32)daddr < (__force __u32)u->daddr.a4)
E
Eric Dumazet 已提交
205 206 207 208 209 210 211 212 213 214
			u = rcu_dereference_bh(u->avl_left);
		else
			u = rcu_dereference_bh(u->avl_right);
		if (unlikely(++count == PEER_MAXDEPTH))
			break;
	}
	return NULL;
}

/* Called with local BH disabled and the pool lock held. */
215
#define lookup_rightempty(start, base)				\
L
Linus Torvalds 已提交
216
({								\
E
Eric Dumazet 已提交
217 218
	struct inet_peer *u;					\
	struct inet_peer __rcu **v;				\
L
Linus Torvalds 已提交
219 220
	*stackptr++ = &start->avl_left;				\
	v = &start->avl_left;					\
E
Eric Dumazet 已提交
221
	for (u = rcu_dereference_protected(*v,			\
222
			lockdep_is_held(&base->lock));		\
E
Eric Dumazet 已提交
223
	     u->avl_right != peer_avl_empty_rcu; ) {		\
L
Linus Torvalds 已提交
224 225
		v = &u->avl_right;				\
		*stackptr++ = v;				\
E
Eric Dumazet 已提交
226
		u = rcu_dereference_protected(*v,		\
227
			lockdep_is_held(&base->lock));		\
L
Linus Torvalds 已提交
228 229 230 231
	}							\
	u;							\
})

E
Eric Dumazet 已提交
232
/* Called with local BH disabled and the pool lock held.
L
Linus Torvalds 已提交
233
 * Variable names are the proof of operation correctness.
E
Eric Dumazet 已提交
234 235
 * Look into mm/map_avl.c for more detail description of the ideas.
 */
E
Eric Dumazet 已提交
236
static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
237 238
			       struct inet_peer __rcu ***stackend,
			       struct inet_peer_base *base)
L
Linus Torvalds 已提交
239
{
E
Eric Dumazet 已提交
240 241
	struct inet_peer __rcu **nodep;
	struct inet_peer *node, *l, *r;
L
Linus Torvalds 已提交
242 243 244 245
	int lh, rh;

	while (stackend > stack) {
		nodep = *--stackend;
E
Eric Dumazet 已提交
246
		node = rcu_dereference_protected(*nodep,
247
				lockdep_is_held(&base->lock));
E
Eric Dumazet 已提交
248
		l = rcu_dereference_protected(node->avl_left,
249
				lockdep_is_held(&base->lock));
E
Eric Dumazet 已提交
250
		r = rcu_dereference_protected(node->avl_right,
251
				lockdep_is_held(&base->lock));
L
Linus Torvalds 已提交
252 253 254 255 256
		lh = node_height(l);
		rh = node_height(r);
		if (lh > rh + 1) { /* l: RH+2 */
			struct inet_peer *ll, *lr, *lrl, *lrr;
			int lrh;
E
Eric Dumazet 已提交
257
			ll = rcu_dereference_protected(l->avl_left,
258
				lockdep_is_held(&base->lock));
E
Eric Dumazet 已提交
259
			lr = rcu_dereference_protected(l->avl_right,
260
				lockdep_is_held(&base->lock));
L
Linus Torvalds 已提交
261 262
			lrh = node_height(lr);
			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
E
Eric Dumazet 已提交
263 264
				RCU_INIT_POINTER(node->avl_left, lr);	/* lr: RH or RH+1 */
				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
L
Linus Torvalds 已提交
265
				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
E
Eric Dumazet 已提交
266 267
				RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
				RCU_INIT_POINTER(l->avl_right, node);	/* node: RH+1 or RH+2 */
L
Linus Torvalds 已提交
268
				l->avl_height = node->avl_height + 1;
E
Eric Dumazet 已提交
269
				RCU_INIT_POINTER(*nodep, l);
L
Linus Torvalds 已提交
270
			} else { /* ll: RH, lr: RH+1 */
E
Eric Dumazet 已提交
271
				lrl = rcu_dereference_protected(lr->avl_left,
272
					lockdep_is_held(&base->lock));	/* lrl: RH or RH-1 */
E
Eric Dumazet 已提交
273
				lrr = rcu_dereference_protected(lr->avl_right,
274
					lockdep_is_held(&base->lock));	/* lrr: RH or RH-1 */
E
Eric Dumazet 已提交
275 276
				RCU_INIT_POINTER(node->avl_left, lrr);	/* lrr: RH or RH-1 */
				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
L
Linus Torvalds 已提交
277
				node->avl_height = rh + 1; /* node: RH+1 */
E
Eric Dumazet 已提交
278 279
				RCU_INIT_POINTER(l->avl_left, ll);	/* ll: RH */
				RCU_INIT_POINTER(l->avl_right, lrl);	/* lrl: RH or RH-1 */
L
Linus Torvalds 已提交
280
				l->avl_height = rh + 1;	/* l: RH+1 */
E
Eric Dumazet 已提交
281 282
				RCU_INIT_POINTER(lr->avl_left, l);	/* l: RH+1 */
				RCU_INIT_POINTER(lr->avl_right, node);	/* node: RH+1 */
L
Linus Torvalds 已提交
283
				lr->avl_height = rh + 2;
E
Eric Dumazet 已提交
284
				RCU_INIT_POINTER(*nodep, lr);
L
Linus Torvalds 已提交
285 286 287 288
			}
		} else if (rh > lh + 1) { /* r: LH+2 */
			struct inet_peer *rr, *rl, *rlr, *rll;
			int rlh;
E
Eric Dumazet 已提交
289
			rr = rcu_dereference_protected(r->avl_right,
290
				lockdep_is_held(&base->lock));
E
Eric Dumazet 已提交
291
			rl = rcu_dereference_protected(r->avl_left,
292
				lockdep_is_held(&base->lock));
L
Linus Torvalds 已提交
293 294
			rlh = node_height(rl);
			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
E
Eric Dumazet 已提交
295 296
				RCU_INIT_POINTER(node->avl_right, rl);	/* rl: LH or LH+1 */
				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
L
Linus Torvalds 已提交
297
				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
E
Eric Dumazet 已提交
298 299
				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH+1 */
				RCU_INIT_POINTER(r->avl_left, node);	/* node: LH+1 or LH+2 */
L
Linus Torvalds 已提交
300
				r->avl_height = node->avl_height + 1;
E
Eric Dumazet 已提交
301
				RCU_INIT_POINTER(*nodep, r);
L
Linus Torvalds 已提交
302
			} else { /* rr: RH, rl: RH+1 */
E
Eric Dumazet 已提交
303
				rlr = rcu_dereference_protected(rl->avl_right,
304
					lockdep_is_held(&base->lock));	/* rlr: LH or LH-1 */
E
Eric Dumazet 已提交
305
				rll = rcu_dereference_protected(rl->avl_left,
306
					lockdep_is_held(&base->lock));	/* rll: LH or LH-1 */
E
Eric Dumazet 已提交
307 308
				RCU_INIT_POINTER(node->avl_right, rll);	/* rll: LH or LH-1 */
				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
L
Linus Torvalds 已提交
309
				node->avl_height = lh + 1; /* node: LH+1 */
E
Eric Dumazet 已提交
310 311
				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH */
				RCU_INIT_POINTER(r->avl_left, rlr);	/* rlr: LH or LH-1 */
L
Linus Torvalds 已提交
312
				r->avl_height = lh + 1;	/* r: LH+1 */
E
Eric Dumazet 已提交
313 314
				RCU_INIT_POINTER(rl->avl_right, r);	/* r: LH+1 */
				RCU_INIT_POINTER(rl->avl_left, node);	/* node: LH+1 */
L
Linus Torvalds 已提交
315
				rl->avl_height = lh + 2;
E
Eric Dumazet 已提交
316
				RCU_INIT_POINTER(*nodep, rl);
L
Linus Torvalds 已提交
317 318 319 320 321 322 323
			}
		} else {
			node->avl_height = (lh > rh ? lh : rh) + 1;
		}
	}
}

E
Eric Dumazet 已提交
324
/* Called with local BH disabled and the pool lock held. */
325
#define link_to_pool(n, base)					\
L
Linus Torvalds 已提交
326 327
do {								\
	n->avl_height = 1;					\
E
Eric Dumazet 已提交
328 329 330 331
	n->avl_left = peer_avl_empty_rcu;			\
	n->avl_right = peer_avl_empty_rcu;			\
	/* lockless readers can catch us now */			\
	rcu_assign_pointer(**--stackptr, n);			\
332
	peer_avl_rebalance(stack, stackptr, base);		\
E
Eric Dumazet 已提交
333
} while (0)
L
Linus Torvalds 已提交
334

E
Eric Dumazet 已提交
335 336 337 338 339
static void inetpeer_free_rcu(struct rcu_head *head)
{
	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
}

L
Linus Torvalds 已提交
340
/* May be called with local BH enabled. */
341
static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
L
Linus Torvalds 已提交
342 343 344 345 346
{
	int do_free;

	do_free = 0;

347
	spin_lock_bh(&base->lock);
L
Linus Torvalds 已提交
348
	/* Check the reference counter.  It was artificially incremented by 1
E
Eric Dumazet 已提交
349 350 351
	 * in cleanup() function to prevent sudden disappearing.  If we can
	 * atomically (because of lockless readers) take this last reference,
	 * it's safe to remove the node and free it later.
352
	 * We use refcnt=-1 to alert lockless readers this entry is deleted.
E
Eric Dumazet 已提交
353
	 */
354
	if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
E
Eric Dumazet 已提交
355 356
		struct inet_peer __rcu **stack[PEER_MAXDEPTH];
		struct inet_peer __rcu ***stackptr, ***delp;
357
		if (lookup(p->daddr.a4, stack, base) != p)
L
Linus Torvalds 已提交
358 359
			BUG();
		delp = stackptr - 1; /* *delp[0] == p */
E
Eric Dumazet 已提交
360
		if (p->avl_left == peer_avl_empty_rcu) {
L
Linus Torvalds 已提交
361 362 363 364 365
			*delp[0] = p->avl_right;
			--stackptr;
		} else {
			/* look for a node to insert instead of p */
			struct inet_peer *t;
366
			t = lookup_rightempty(p, base);
E
Eric Dumazet 已提交
367
			BUG_ON(rcu_dereference_protected(*stackptr[-1],
368
					lockdep_is_held(&base->lock)) != t);
L
Linus Torvalds 已提交
369
			**--stackptr = t->avl_left;
370
			/* t is removed, t->daddr > x->daddr for any
L
Linus Torvalds 已提交
371 372
			 * x in p->avl_left subtree.
			 * Put t in the old place of p. */
E
Eric Dumazet 已提交
373
			RCU_INIT_POINTER(*delp[0], t);
L
Linus Torvalds 已提交
374 375 376
			t->avl_left = p->avl_left;
			t->avl_right = p->avl_right;
			t->avl_height = p->avl_height;
377
			BUG_ON(delp[1] != &p->avl_left);
L
Linus Torvalds 已提交
378 379
			delp[1] = &t->avl_left; /* was &p->avl_left */
		}
380 381
		peer_avl_rebalance(stack, stackptr, base);
		base->total--;
L
Linus Torvalds 已提交
382 383
		do_free = 1;
	}
384
	spin_unlock_bh(&base->lock);
L
Linus Torvalds 已提交
385 386

	if (do_free)
E
Eric Dumazet 已提交
387
		call_rcu_bh(&p->rcu, inetpeer_free_rcu);
L
Linus Torvalds 已提交
388 389 390 391 392 393
	else
		/* The node is used again.  Decrease the reference counter
		 * back.  The loop "cleanup -> unlink_from_unused
		 *   -> unlink_from_pool -> putpeer -> link_to_unused
		 *   -> cleanup (for the same node)"
		 * doesn't really exist because the entry will have a
E
Eric Dumazet 已提交
394 395
		 * recent deletion time and will not be cleaned again soon.
		 */
L
Linus Torvalds 已提交
396 397 398
		inet_putpeer(p);
}

399 400 401 402 403
static struct inet_peer_base *peer_to_base(struct inet_peer *p)
{
	return &v4_peers;
}

L
Linus Torvalds 已提交
404 405 406
/* May be called with local BH enabled. */
static int cleanup_once(unsigned long ttl)
{
407
	struct inet_peer *p = NULL;
L
Linus Torvalds 已提交
408 409

	/* Remove the first entry from the list of unused nodes. */
E
Eric Dumazet 已提交
410 411
	spin_lock_bh(&unused_peers.lock);
	if (!list_empty(&unused_peers.list)) {
412 413
		__u32 delta;

E
Eric Dumazet 已提交
414
		p = list_first_entry(&unused_peers.list, struct inet_peer, unused);
415 416
		delta = (__u32)jiffies - p->dtime;

417
		if (delta < ttl) {
L
Linus Torvalds 已提交
418
			/* Do not prune fresh entries. */
E
Eric Dumazet 已提交
419
			spin_unlock_bh(&unused_peers.lock);
L
Linus Torvalds 已提交
420 421
			return -1;
		}
422 423 424

		list_del_init(&p->unused);

L
Linus Torvalds 已提交
425 426 427 428
		/* Grab an extra reference to prevent node disappearing
		 * before unlink_from_pool() call. */
		atomic_inc(&p->refcnt);
	}
E
Eric Dumazet 已提交
429
	spin_unlock_bh(&unused_peers.lock);
L
Linus Torvalds 已提交
430 431 432 433 434 435 436

	if (p == NULL)
		/* It means that the total number of USED entries has
		 * grown over inet_peer_threshold.  It shouldn't really
		 * happen because of entry limits in route cache. */
		return -1;

437
	unlink_from_pool(p, peer_to_base(p));
L
Linus Torvalds 已提交
438 439 440
	return 0;
}

441 442 443 444 445
static struct inet_peer_base *family_to_base(int family)
{
	return &v4_peers;
}

L
Linus Torvalds 已提交
446
/* Called with or without local BH being disabled. */
447
struct inet_peer *inet_getpeer(inet_peer_address_t *daddr, int create)
L
Linus Torvalds 已提交
448
{
E
Eric Dumazet 已提交
449
	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
450 451
	struct inet_peer_base *base = family_to_base(AF_INET);
	struct inet_peer *p;
L
Linus Torvalds 已提交
452

E
Eric Dumazet 已提交
453 454 455 456
	/* Look up for the address quickly, lockless.
	 * Because of a concurrent writer, we might not find an existing entry.
	 */
	rcu_read_lock_bh();
457
	p = lookup_rcu_bh(daddr->a4, base);
E
Eric Dumazet 已提交
458 459 460 461 462 463 464 465 466
	rcu_read_unlock_bh();

	if (p) {
		/* The existing node has been found.
		 * Remove the entry from unused list if it was there.
		 */
		unlink_from_unused(p);
		return p;
	}
L
Linus Torvalds 已提交
467

E
Eric Dumazet 已提交
468 469 470
	/* retry an exact lookup, taking the lock before.
	 * At least, nodes should be hot in our cache.
	 */
471
	spin_lock_bh(&base->lock);
472
	p = lookup(daddr->a4, stack, base);
L
Linus Torvalds 已提交
473
	if (p != peer_avl_empty) {
E
Eric Dumazet 已提交
474
		atomic_inc(&p->refcnt);
475
		spin_unlock_bh(&base->lock);
L
Linus Torvalds 已提交
476 477 478 479
		/* Remove the entry from unused list if it was there. */
		unlink_from_unused(p);
		return p;
	}
E
Eric Dumazet 已提交
480 481
	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
	if (p) {
482
		p->daddr = *daddr;
E
Eric Dumazet 已提交
483 484
		atomic_set(&p->refcnt, 1);
		atomic_set(&p->rid, 0);
485
		atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
E
Eric Dumazet 已提交
486 487 488 489 490
		p->tcp_ts_stamp = 0;
		INIT_LIST_HEAD(&p->unused);


		/* Link the node. */
491 492
		link_to_pool(p, base);
		base->total++;
E
Eric Dumazet 已提交
493
	}
494
	spin_unlock_bh(&base->lock);
L
Linus Torvalds 已提交
495

496
	if (base->total >= inet_peer_threshold)
L
Linus Torvalds 已提交
497 498 499 500 501 502
		/* Remove one less-recently-used entry. */
		cleanup_once(0);

	return p;
}

503 504 505 506 507
static int compute_total(void)
{
	return v4_peers.total;
}

L
Linus Torvalds 已提交
508 509 510
/* Called with local BH disabled. */
static void peer_check_expire(unsigned long dummy)
{
511
	unsigned long now = jiffies;
512
	int ttl, total;
L
Linus Torvalds 已提交
513

514 515
	total = compute_total();
	if (total >= inet_peer_threshold)
L
Linus Torvalds 已提交
516 517 518 519
		ttl = inet_peer_minttl;
	else
		ttl = inet_peer_maxttl
				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
520
					total / inet_peer_threshold * HZ;
521 522 523 524
	while (!cleanup_once(ttl)) {
		if (jiffies != now)
			break;
	}
L
Linus Torvalds 已提交
525 526 527 528

	/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
	 * interval depending on the total number of entries (more entries,
	 * less interval). */
529 530
	total = compute_total();
	if (total >= inet_peer_threshold)
531 532 533 534 535
		peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
	else
		peer_periodic_timer.expires = jiffies
			+ inet_peer_gc_maxtime
			- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
536
				total / inet_peer_threshold * HZ;
L
Linus Torvalds 已提交
537 538
	add_timer(&peer_periodic_timer);
}
539 540 541

void inet_putpeer(struct inet_peer *p)
{
E
Eric Dumazet 已提交
542 543 544 545
	local_bh_disable();

	if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) {
		list_add_tail(&p->unused, &unused_peers.list);
546
		p->dtime = (__u32)jiffies;
E
Eric Dumazet 已提交
547
		spin_unlock(&unused_peers.lock);
548
	}
E
Eric Dumazet 已提交
549 550

	local_bh_enable();
551
}