inet_hashtables.h 14.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the BSD Socket
 *		interface as the means of communication with the user level.
 *
 * Authors:	Lotsa people, from code originally in tcp
 *
 *	This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#ifndef _INET_HASHTABLES_H
#define _INET_HASHTABLES_H

17

18
#include <linux/interrupt.h>
19
#include <linux/ipv6.h>
20 21
#include <linux/list.h>
#include <linux/slab.h>
22
#include <linux/socket.h>
23
#include <linux/spinlock.h>
24
#include <linux/types.h>
25
#include <linux/wait.h>
26

27
#include <net/inet_connection_sock.h>
28
#include <net/inet_sock.h>
29
#include <net/sock.h>
30
#include <net/tcp_states.h>
31

32
#include <asm/atomic.h>
33
#include <asm/byteorder.h>
34

35
/* This is for all connections with a full identity, no wildcards.
36 37
 * One chain is dedicated to TIME_WAIT sockets.
 * I'll experiment with dynamic table growth later.
38 39 40
 */
struct inet_ehash_bucket {
	struct hlist_head chain;
41
	struct hlist_head twchain;
42
};
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98

/* There are a few simple rules, which allow for local port reuse by
 * an application.  In essence:
 *
 *	1) Sockets bound to different interfaces may share a local port.
 *	   Failing that, goto test 2.
 *	2) If all sockets have sk->sk_reuse set, and none of them are in
 *	   TCP_LISTEN state, the port may be shared.
 *	   Failing that, goto test 3.
 *	3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
 *	   address, and none of them are the same, the port may be
 *	   shared.
 *	   Failing this, the port cannot be shared.
 *
 * The interesting point, is test #2.  This is what an FTP server does
 * all day.  To optimize this case we use a specific flag bit defined
 * below.  As we add sockets to a bind bucket list, we perform a
 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
 * As long as all sockets added to a bind bucket pass this test,
 * the flag bit will be set.
 * The resulting situation is that tcp_v[46]_verify_bind() can just check
 * for this flag bit, if it is set and the socket trying to bind has
 * sk->sk_reuse set, we don't even have to walk the owners list at all,
 * we return that it is ok to bind this socket to the requested local port.
 *
 * Sounds like a lot of work, but it is worth it.  In a more naive
 * implementation (ie. current FreeBSD etc.) the entire list of ports
 * must be walked for each data port opened by an ftp server.  Needless
 * to say, this does not scale at all.  With a couple thousand FTP
 * users logged onto your box, isn't it nice to know that new data
 * ports are created in O(1) time?  I thought so. ;-)	-DaveM
 */
struct inet_bind_bucket {
	unsigned short		port;
	signed short		fastreuse;
	struct hlist_node	node;
	struct hlist_head	owners;
};

#define inet_bind_bucket_for_each(tb, node, head) \
	hlist_for_each_entry(tb, node, head, node)

struct inet_bind_hashbucket {
	spinlock_t		lock;
	struct hlist_head	chain;
};

/* This is for listening sockets, thus all sockets which possess wildcards. */
#define INET_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */

struct inet_hashinfo {
	/* This is for sockets with full identity only.  Sockets here will
	 * always be without wildcards and will have the following invariant:
	 *
	 *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
	 *
99
	 * TIME_WAIT sockets use a separate chain (twchain).
100 101
	 */
	struct inet_ehash_bucket	*ehash;
102 103 104
	rwlock_t			*ehash_locks;
	unsigned int			ehash_size;
	unsigned int			ehash_locks_mask;
105 106 107 108 109 110

	/* Ok, let's try this, I give up, we do need a local binding
	 * TCP hash as well as the others for fast bind/connect.
	 */
	struct inet_bind_hashbucket	*bhash;

S
Stephen Hemminger 已提交
111
	unsigned int			bhash_size;
112
	/* Note : 4 bytes padding on 64 bit arches */
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128

	/* All sockets in TCP_LISTEN state will be in here.  This is the only
	 * table where wildcard'd TCP sockets can exist.  Hash function here
	 * is just local port number.
	 */
	struct hlist_head		listening_hash[INET_LHTABLE_SIZE];

	/* All the above members are written once at bootup and
	 * never written again _or_ are predominantly read-access.
	 *
	 * Now align to a new cache line as all the following members
	 * are often dirty.
	 */
	rwlock_t			lhash_lock ____cacheline_aligned;
	atomic_t			lhash_users;
	wait_queue_head_t		lhash_wait;
129
	struct kmem_cache			*bind_bucket_cachep;
130 131
};

132 133 134 135 136
static inline struct inet_ehash_bucket *inet_ehash_bucket(
	struct inet_hashinfo *hashinfo,
	unsigned int hash)
{
	return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
137 138
}

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
static inline rwlock_t *inet_ehash_lockp(
	struct inet_hashinfo *hashinfo,
	unsigned int hash)
{
	return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
}

static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
{
	unsigned int i, size = 256;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif
	if (nr_pcpus >= 4)
		size = 512;
	if (nr_pcpus >= 8)
		size = 1024;
	if (nr_pcpus >= 16)
		size = 2048;
	if (nr_pcpus >= 32)
		size = 4096;
	if (sizeof(rwlock_t) != 0) {
#ifdef CONFIG_NUMA
		if (size * sizeof(rwlock_t) > PAGE_SIZE)
			hashinfo->ehash_locks = vmalloc(size * sizeof(rwlock_t));
		else
#endif
		hashinfo->ehash_locks =	kmalloc(size * sizeof(rwlock_t),
						GFP_KERNEL);
		if (!hashinfo->ehash_locks)
			return ENOMEM;
		for (i = 0; i < size; i++)
			rwlock_init(&hashinfo->ehash_locks[i]);
	}
	hashinfo->ehash_locks_mask = size - 1;
	return 0;
}

static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
	if (hashinfo->ehash_locks) {
#ifdef CONFIG_NUMA
		unsigned int size = (hashinfo->ehash_locks_mask + 1) *
							sizeof(rwlock_t);
		if (size > PAGE_SIZE)
			vfree(hashinfo->ehash_locks);
		else
#else
		kfree(hashinfo->ehash_locks);
#endif
		hashinfo->ehash_locks = NULL;
	}
}

195
extern struct inet_bind_bucket *
196
		    inet_bind_bucket_create(struct kmem_cache *cachep,
197 198
					    struct inet_bind_hashbucket *head,
					    const unsigned short snum);
199
extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
200 201 202 203 204 205 206
				     struct inet_bind_bucket *tb);

static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
{
	return lport & (bhash_size - 1);
}

207 208 209
extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
			   const unsigned short snum);

210 211 212 213 214 215 216 217 218 219 220
/* These can have wildcards, don't try too hard. */
static inline int inet_lhashfn(const unsigned short num)
{
	return num & (INET_LHTABLE_SIZE - 1);
}

static inline int inet_sk_listen_hashfn(const struct sock *sk)
{
	return inet_lhashfn(inet_sk(sk)->num);
}

221 222 223 224 225 226 227 228 229
/* Caller must disable local BH processing. */
static inline void __inet_inherit_port(struct inet_hashinfo *table,
				       struct sock *sk, struct sock *child)
{
	const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
	struct inet_bind_hashbucket *head = &table->bhash[bhash];
	struct inet_bind_bucket *tb;

	spin_lock(&head->lock);
230
	tb = inet_csk(sk)->icsk_bind_hash;
231
	sk_add_bind_node(child, &tb->owners);
232
	inet_csk(child)->icsk_bind_hash = tb;
233 234 235 236 237 238 239 240 241 242 243 244 245
	spin_unlock(&head->lock);
}

static inline void inet_inherit_port(struct inet_hashinfo *table,
				     struct sock *sk, struct sock *child)
{
	local_bh_disable();
	__inet_inherit_port(table, sk, child);
	local_bh_enable();
}

extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);

/*
 * - We may sleep inside this lock.
 * - If sleeping is not required (or called from BH),
 *   use plain read_(un)lock(&inet_hashinfo.lhash_lock).
 */
static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
{
	/* read_lock synchronizes to candidates to writers */
	read_lock(&hashinfo->lhash_lock);
	atomic_inc(&hashinfo->lhash_users);
	read_unlock(&hashinfo->lhash_lock);
}

static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
{
	if (atomic_dec_and_test(&hashinfo->lhash_users))
		wake_up(&hashinfo->lhash_wait);
}

static inline void __inet_hash(struct inet_hashinfo *hashinfo,
			       struct sock *sk, const int listen_possible)
{
	struct hlist_head *list;
	rwlock_t *lock;

	BUG_TRAP(sk_unhashed(sk));
	if (listen_possible && sk->sk_state == TCP_LISTEN) {
		list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
		lock = &hashinfo->lhash_lock;
		inet_listen_wlock(hashinfo);
	} else {
279 280 281 282
		struct inet_ehash_bucket *head;
		sk->sk_hash = inet_sk_ehashfn(sk);
		head = inet_ehash_bucket(hashinfo, sk->sk_hash);
		list = &head->chain;
283
		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
284 285 286 287 288 289 290 291
		write_lock(lock);
	}
	__sk_add_node(sk, list);
	sock_prot_inc_use(sk->sk_prot);
	write_unlock(lock);
	if (listen_possible && sk->sk_state == TCP_LISTEN)
		wake_up(&hashinfo->lhash_wait);
}
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313

static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
{
	if (sk->sk_state != TCP_CLOSE) {
		local_bh_disable();
		__inet_hash(hashinfo, sk, 1);
		local_bh_enable();
	}
}

static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
{
	rwlock_t *lock;

	if (sk_unhashed(sk))
		goto out;

	if (sk->sk_state == TCP_LISTEN) {
		local_bh_disable();
		inet_listen_wlock(hashinfo);
		lock = &hashinfo->lhash_lock;
	} else {
314
		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
315
		write_lock_bh(lock);
316 317 318 319 320 321 322 323 324
	}

	if (__sk_del_node_init(sk))
		sock_prot_dec_use(sk->sk_prot);
	write_unlock_bh(lock);
out:
	if (sk->sk_state == TCP_LISTEN)
		wake_up(&hashinfo->lhash_wait);
}
325

326
extern struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo,
327
					   const __be32 daddr,
328 329 330 331
					   const unsigned short hnum,
					   const int dif);

static inline struct sock *inet_lookup_listener(struct inet_hashinfo *hashinfo,
332
						__be32 daddr, __be16 dport, int dif)
333 334 335
{
	return __inet_lookup_listener(hashinfo, daddr, ntohs(dport), dif);
}
336 337

/* Socket demux engine toys. */
A
Al Viro 已提交
338 339 340 341 342 343 344 345 346
/* What happens here is ugly; there's a pair of adjacent fields in
   struct inet_sock; __be16 dport followed by __u16 num.  We want to
   search by pair, so we combine the keys into a single 32bit value
   and compare with 32bit value read from &...->dport.  Let's at least
   make sure that it's not mixed with anything else...
   On 64bit targets we combine comparisons with pair of adjacent __be32
   fields in the same way.
*/
typedef __u32 __bitwise __portpair;
347 348
#ifdef __BIG_ENDIAN
#define INET_COMBINED_PORTS(__sport, __dport) \
A
Al Viro 已提交
349
	((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
350 351
#else /* __LITTLE_ENDIAN */
#define INET_COMBINED_PORTS(__sport, __dport) \
A
Al Viro 已提交
352
	((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
353 354 355
#endif

#if (BITS_PER_LONG == 64)
A
Al Viro 已提交
356
typedef __u64 __bitwise __addrpair;
357 358
#ifdef __BIG_ENDIAN
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
A
Al Viro 已提交
359 360 361
	const __addrpair __name = (__force __addrpair) ( \
				   (((__force __u64)(__be32)(__saddr)) << 32) | \
				   ((__force __u64)(__be32)(__daddr)));
362 363
#else /* __LITTLE_ENDIAN */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
A
Al Viro 已提交
364 365 366
	const __addrpair __name = (__force __addrpair) ( \
				   (((__force __u64)(__be32)(__daddr)) << 32) | \
				   ((__force __u64)(__be32)(__saddr)));
367
#endif /* __BIG_ENDIAN */
368 369
#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
	(((__sk)->sk_hash == (__hash))				&&	\
A
Al Viro 已提交
370 371
	 ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie))	&&	\
	 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))	&&	\
372
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
373 374
#define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
	(((__sk)->sk_hash == (__hash))				&&	\
A
Al Viro 已提交
375 376
	 ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) &&	\
	 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&	\
377 378 379
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
380 381 382
#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)	\
	(((__sk)->sk_hash == (__hash))				&&	\
	 (inet_sk(__sk)->daddr		== (__saddr))		&&	\
383
	 (inet_sk(__sk)->rcv_saddr	== (__daddr))		&&	\
A
Al Viro 已提交
384
	 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))	&&	\
385
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
386 387 388
#define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif)	\
	(((__sk)->sk_hash == (__hash))				&&	\
	 (inet_twsk(__sk)->tw_daddr	== (__saddr))		&&	\
389
	 (inet_twsk(__sk)->tw_rcv_saddr	== (__daddr))		&&	\
A
Al Viro 已提交
390
	 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&	\
391 392
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#endif /* 64-bit arch */
393 394 395 396 397 398 399 400 401

/*
 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
 * not check it for lookups anymore, thanks Alexey. -DaveM
 *
 * Local BH must be disabled here.
 */
static inline struct sock *
	__inet_lookup_established(struct inet_hashinfo *hashinfo,
402 403
				  const __be32 saddr, const __be16 sport,
				  const __be32 daddr, const u16 hnum,
404 405 406
				  const int dif)
{
	INET_ADDR_COOKIE(acookie, saddr, daddr)
A
Al Viro 已提交
407
	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
408 409 410 411 412
	struct sock *sk;
	const struct hlist_node *node;
	/* Optimize here for direct hit, only listening connections can
	 * have wildcards anyways.
	 */
413 414
	unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
	struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
415
	rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
416

417
	prefetch(head->chain.first);
418
	read_lock(lock);
419
	sk_for_each(sk, node, &head->chain) {
420
		if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
421 422 423 424
			goto hit; /* You sunk my battleship! */
	}

	/* Must check for a TIME_WAIT'er before going to listener hash. */
425
	sk_for_each(sk, node, &head->twchain) {
426
		if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
427 428 429 430
			goto hit;
	}
	sk = NULL;
out:
431
	read_unlock(lock);
432 433 434 435 436 437
	return sk;
hit:
	sock_hold(sk);
	goto out;
}

438 439
static inline struct sock *
	inet_lookup_established(struct inet_hashinfo *hashinfo,
440 441
				const __be32 saddr, const __be16 sport,
				const __be32 daddr, const __be16 dport,
442 443 444 445 446 447
				const int dif)
{
	return __inet_lookup_established(hashinfo, saddr, sport, daddr,
					 ntohs(dport), dif);
}

448
static inline struct sock *__inet_lookup(struct inet_hashinfo *hashinfo,
449 450
					 const __be32 saddr, const __be16 sport,
					 const __be32 daddr, const __be16 dport,
451 452
					 const int dif)
{
453
	u16 hnum = ntohs(dport);
454 455
	struct sock *sk = __inet_lookup_established(hashinfo, saddr, sport, daddr,
						    hnum, dif);
456
	return sk ? : __inet_lookup_listener(hashinfo, daddr, hnum, dif);
457 458 459
}

static inline struct sock *inet_lookup(struct inet_hashinfo *hashinfo,
460 461
				       const __be32 saddr, const __be16 sport,
				       const __be32 daddr, const __be16 dport,
462 463 464 465 466
				       const int dif)
{
	struct sock *sk;

	local_bh_disable();
467
	sk = __inet_lookup(hashinfo, saddr, sport, daddr, dport, dif);
468 469 470 471
	local_bh_enable();

	return sk;
}
472 473 474

extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
			     struct sock *sk);
475
#endif /* _INET_HASHTABLES_H */