inet_hashtables.h 13.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the BSD Socket
 *		interface as the means of communication with the user level.
 *
 * Authors:	Lotsa people, from code originally in tcp
 *
 *	This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#ifndef _INET_HASHTABLES_H
#define _INET_HASHTABLES_H

17

18
#include <linux/interrupt.h>
19
#include <linux/ipv6.h>
20 21
#include <linux/list.h>
#include <linux/slab.h>
22
#include <linux/socket.h>
23
#include <linux/spinlock.h>
24
#include <linux/types.h>
25
#include <linux/wait.h>
26

27
#include <net/inet_connection_sock.h>
28
#include <net/inet_sock.h>
29
#include <net/sock.h>
30
#include <net/tcp_states.h>
31

32
#include <asm/atomic.h>
33
#include <asm/byteorder.h>
34

35
/* This is for all connections with a full identity, no wildcards.
36 37
 * One chain is dedicated to TIME_WAIT sockets.
 * I'll experiment with dynamic table growth later.
38 39 40 41
 */
struct inet_ehash_bucket {
	rwlock_t	  lock;
	struct hlist_head chain;
42
	struct hlist_head twchain;
43
};
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99

/* There are a few simple rules, which allow for local port reuse by
 * an application.  In essence:
 *
 *	1) Sockets bound to different interfaces may share a local port.
 *	   Failing that, goto test 2.
 *	2) If all sockets have sk->sk_reuse set, and none of them are in
 *	   TCP_LISTEN state, the port may be shared.
 *	   Failing that, goto test 3.
 *	3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
 *	   address, and none of them are the same, the port may be
 *	   shared.
 *	   Failing this, the port cannot be shared.
 *
 * The interesting point, is test #2.  This is what an FTP server does
 * all day.  To optimize this case we use a specific flag bit defined
 * below.  As we add sockets to a bind bucket list, we perform a
 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
 * As long as all sockets added to a bind bucket pass this test,
 * the flag bit will be set.
 * The resulting situation is that tcp_v[46]_verify_bind() can just check
 * for this flag bit, if it is set and the socket trying to bind has
 * sk->sk_reuse set, we don't even have to walk the owners list at all,
 * we return that it is ok to bind this socket to the requested local port.
 *
 * Sounds like a lot of work, but it is worth it.  In a more naive
 * implementation (ie. current FreeBSD etc.) the entire list of ports
 * must be walked for each data port opened by an ftp server.  Needless
 * to say, this does not scale at all.  With a couple thousand FTP
 * users logged onto your box, isn't it nice to know that new data
 * ports are created in O(1) time?  I thought so. ;-)	-DaveM
 */
struct inet_bind_bucket {
	unsigned short		port;
	signed short		fastreuse;
	struct hlist_node	node;
	struct hlist_head	owners;
};

#define inet_bind_bucket_for_each(tb, node, head) \
	hlist_for_each_entry(tb, node, head, node)

struct inet_bind_hashbucket {
	spinlock_t		lock;
	struct hlist_head	chain;
};

/* This is for listening sockets, thus all sockets which possess wildcards. */
#define INET_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */

struct inet_hashinfo {
	/* This is for sockets with full identity only.  Sockets here will
	 * always be without wildcards and will have the following invariant:
	 *
	 *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
	 *
100
	 * TIME_WAIT sockets use a separate chain (twchain).
101 102 103 104 105 106 107 108
	 */
	struct inet_ehash_bucket	*ehash;

	/* Ok, let's try this, I give up, we do need a local binding
	 * TCP hash as well as the others for fast bind/connect.
	 */
	struct inet_bind_hashbucket	*bhash;

S
Stephen Hemminger 已提交
109
	unsigned int			bhash_size;
110
	unsigned int			ehash_size;
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126

	/* All sockets in TCP_LISTEN state will be in here.  This is the only
	 * table where wildcard'd TCP sockets can exist.  Hash function here
	 * is just local port number.
	 */
	struct hlist_head		listening_hash[INET_LHTABLE_SIZE];

	/* All the above members are written once at bootup and
	 * never written again _or_ are predominantly read-access.
	 *
	 * Now align to a new cache line as all the following members
	 * are often dirty.
	 */
	rwlock_t			lhash_lock ____cacheline_aligned;
	atomic_t			lhash_users;
	wait_queue_head_t		lhash_wait;
127
	struct kmem_cache			*bind_bucket_cachep;
128 129
};

130 131 132 133 134
static inline struct inet_ehash_bucket *inet_ehash_bucket(
	struct inet_hashinfo *hashinfo,
	unsigned int hash)
{
	return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
135 136
}

137
extern struct inet_bind_bucket *
138
		    inet_bind_bucket_create(struct kmem_cache *cachep,
139 140
					    struct inet_bind_hashbucket *head,
					    const unsigned short snum);
141
extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
142 143 144 145 146 147 148
				     struct inet_bind_bucket *tb);

static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
{
	return lport & (bhash_size - 1);
}

149 150 151
extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
			   const unsigned short snum);

152 153 154 155 156 157 158 159 160 161 162
/* These can have wildcards, don't try too hard. */
static inline int inet_lhashfn(const unsigned short num)
{
	return num & (INET_LHTABLE_SIZE - 1);
}

static inline int inet_sk_listen_hashfn(const struct sock *sk)
{
	return inet_lhashfn(inet_sk(sk)->num);
}

163 164 165 166 167 168 169 170 171
/* Caller must disable local BH processing. */
static inline void __inet_inherit_port(struct inet_hashinfo *table,
				       struct sock *sk, struct sock *child)
{
	const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
	struct inet_bind_hashbucket *head = &table->bhash[bhash];
	struct inet_bind_bucket *tb;

	spin_lock(&head->lock);
172
	tb = inet_csk(sk)->icsk_bind_hash;
173
	sk_add_bind_node(child, &tb->owners);
174
	inet_csk(child)->icsk_bind_hash = tb;
175 176 177 178 179 180 181 182 183 184 185 186 187
	spin_unlock(&head->lock);
}

static inline void inet_inherit_port(struct inet_hashinfo *table,
				     struct sock *sk, struct sock *child)
{
	local_bh_disable();
	__inet_inherit_port(table, sk, child);
	local_bh_enable();
}

extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);

/*
 * - We may sleep inside this lock.
 * - If sleeping is not required (or called from BH),
 *   use plain read_(un)lock(&inet_hashinfo.lhash_lock).
 */
static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
{
	/* read_lock synchronizes to candidates to writers */
	read_lock(&hashinfo->lhash_lock);
	atomic_inc(&hashinfo->lhash_users);
	read_unlock(&hashinfo->lhash_lock);
}

static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
{
	if (atomic_dec_and_test(&hashinfo->lhash_users))
		wake_up(&hashinfo->lhash_wait);
}

static inline void __inet_hash(struct inet_hashinfo *hashinfo,
			       struct sock *sk, const int listen_possible)
{
	struct hlist_head *list;
	rwlock_t *lock;

	BUG_TRAP(sk_unhashed(sk));
	if (listen_possible && sk->sk_state == TCP_LISTEN) {
		list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
		lock = &hashinfo->lhash_lock;
		inet_listen_wlock(hashinfo);
	} else {
221 222 223 224 225
		struct inet_ehash_bucket *head;
		sk->sk_hash = inet_sk_ehashfn(sk);
		head = inet_ehash_bucket(hashinfo, sk->sk_hash);
		list = &head->chain;
		lock = &head->lock;
226 227 228 229 230 231 232 233
		write_lock(lock);
	}
	__sk_add_node(sk, list);
	sock_prot_inc_use(sk->sk_prot);
	write_unlock(lock);
	if (listen_possible && sk->sk_state == TCP_LISTEN)
		wake_up(&hashinfo->lhash_wait);
}
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255

static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
{
	if (sk->sk_state != TCP_CLOSE) {
		local_bh_disable();
		__inet_hash(hashinfo, sk, 1);
		local_bh_enable();
	}
}

static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
{
	rwlock_t *lock;

	if (sk_unhashed(sk))
		goto out;

	if (sk->sk_state == TCP_LISTEN) {
		local_bh_disable();
		inet_listen_wlock(hashinfo);
		lock = &hashinfo->lhash_lock;
	} else {
256 257
		lock = &inet_ehash_bucket(hashinfo, sk->sk_hash)->lock;
		write_lock_bh(lock);
258 259 260 261 262 263 264 265 266
	}

	if (__sk_del_node_init(sk))
		sock_prot_dec_use(sk->sk_prot);
	write_unlock_bh(lock);
out:
	if (sk->sk_state == TCP_LISTEN)
		wake_up(&hashinfo->lhash_wait);
}
267

268
extern struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo,
269
					   const __be32 daddr,
270 271 272 273
					   const unsigned short hnum,
					   const int dif);

static inline struct sock *inet_lookup_listener(struct inet_hashinfo *hashinfo,
274
						__be32 daddr, __be16 dport, int dif)
275 276 277
{
	return __inet_lookup_listener(hashinfo, daddr, ntohs(dport), dif);
}
278 279

/* Socket demux engine toys. */
A
Al Viro 已提交
280 281 282 283 284 285 286 287 288
/* What happens here is ugly; there's a pair of adjacent fields in
   struct inet_sock; __be16 dport followed by __u16 num.  We want to
   search by pair, so we combine the keys into a single 32bit value
   and compare with 32bit value read from &...->dport.  Let's at least
   make sure that it's not mixed with anything else...
   On 64bit targets we combine comparisons with pair of adjacent __be32
   fields in the same way.
*/
typedef __u32 __bitwise __portpair;
289 290
#ifdef __BIG_ENDIAN
#define INET_COMBINED_PORTS(__sport, __dport) \
A
Al Viro 已提交
291
	((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
292 293
#else /* __LITTLE_ENDIAN */
#define INET_COMBINED_PORTS(__sport, __dport) \
A
Al Viro 已提交
294
	((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
295 296 297
#endif

#if (BITS_PER_LONG == 64)
A
Al Viro 已提交
298
typedef __u64 __bitwise __addrpair;
299 300
#ifdef __BIG_ENDIAN
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
A
Al Viro 已提交
301 302 303
	const __addrpair __name = (__force __addrpair) ( \
				   (((__force __u64)(__be32)(__saddr)) << 32) | \
				   ((__force __u64)(__be32)(__daddr)));
304 305
#else /* __LITTLE_ENDIAN */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
A
Al Viro 已提交
306 307 308
	const __addrpair __name = (__force __addrpair) ( \
				   (((__force __u64)(__be32)(__daddr)) << 32) | \
				   ((__force __u64)(__be32)(__saddr)));
309
#endif /* __BIG_ENDIAN */
310 311
#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
	(((__sk)->sk_hash == (__hash))				&&	\
A
Al Viro 已提交
312 313
	 ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie))	&&	\
	 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))	&&	\
314
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
315 316
#define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
	(((__sk)->sk_hash == (__hash))				&&	\
A
Al Viro 已提交
317 318
	 ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) &&	\
	 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&	\
319 320 321
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
322 323 324
#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)	\
	(((__sk)->sk_hash == (__hash))				&&	\
	 (inet_sk(__sk)->daddr		== (__saddr))		&&	\
325
	 (inet_sk(__sk)->rcv_saddr	== (__daddr))		&&	\
A
Al Viro 已提交
326
	 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))	&&	\
327
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
328 329 330
#define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif)	\
	(((__sk)->sk_hash == (__hash))				&&	\
	 (inet_twsk(__sk)->tw_daddr	== (__saddr))		&&	\
331
	 (inet_twsk(__sk)->tw_rcv_saddr	== (__daddr))		&&	\
A
Al Viro 已提交
332
	 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&	\
333 334
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#endif /* 64-bit arch */
335 336 337 338 339 340 341 342 343

/*
 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
 * not check it for lookups anymore, thanks Alexey. -DaveM
 *
 * Local BH must be disabled here.
 */
static inline struct sock *
	__inet_lookup_established(struct inet_hashinfo *hashinfo,
344 345
				  const __be32 saddr, const __be16 sport,
				  const __be32 daddr, const u16 hnum,
346 347 348
				  const int dif)
{
	INET_ADDR_COOKIE(acookie, saddr, daddr)
A
Al Viro 已提交
349
	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
350 351 352 353 354
	struct sock *sk;
	const struct hlist_node *node;
	/* Optimize here for direct hit, only listening connections can
	 * have wildcards anyways.
	 */
355 356
	unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
	struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
357

358
	prefetch(head->chain.first);
359 360
	read_lock(&head->lock);
	sk_for_each(sk, node, &head->chain) {
361
		if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
362 363 364 365
			goto hit; /* You sunk my battleship! */
	}

	/* Must check for a TIME_WAIT'er before going to listener hash. */
366
	sk_for_each(sk, node, &head->twchain) {
367
		if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
368 369 370 371 372 373 374 375 376 377 378
			goto hit;
	}
	sk = NULL;
out:
	read_unlock(&head->lock);
	return sk;
hit:
	sock_hold(sk);
	goto out;
}

379 380
static inline struct sock *
	inet_lookup_established(struct inet_hashinfo *hashinfo,
381 382
				const __be32 saddr, const __be16 sport,
				const __be32 daddr, const __be16 dport,
383 384 385 386 387 388
				const int dif)
{
	return __inet_lookup_established(hashinfo, saddr, sport, daddr,
					 ntohs(dport), dif);
}

389
static inline struct sock *__inet_lookup(struct inet_hashinfo *hashinfo,
390 391
					 const __be32 saddr, const __be16 sport,
					 const __be32 daddr, const __be16 dport,
392 393
					 const int dif)
{
394
	u16 hnum = ntohs(dport);
395 396
	struct sock *sk = __inet_lookup_established(hashinfo, saddr, sport, daddr,
						    hnum, dif);
397
	return sk ? : __inet_lookup_listener(hashinfo, daddr, hnum, dif);
398 399 400
}

static inline struct sock *inet_lookup(struct inet_hashinfo *hashinfo,
401 402
				       const __be32 saddr, const __be16 sport,
				       const __be32 daddr, const __be16 dport,
403 404 405 406 407
				       const int dif)
{
	struct sock *sk;

	local_bh_disable();
408
	sk = __inet_lookup(hashinfo, saddr, sport, daddr, dport, dif);
409 410 411 412
	local_bh_enable();

	return sk;
}
413 414 415

extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
			     struct sock *sk);
416
#endif /* _INET_HASHTABLES_H */