inet_hashtables.h 13.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the BSD Socket
 *		interface as the means of communication with the user level.
 *
 * Authors:	Lotsa people, from code originally in tcp
 *
 *	This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#ifndef _INET_HASHTABLES_H
#define _INET_HASHTABLES_H

17

18
#include <linux/interrupt.h>
19
#include <linux/ip.h>
20
#include <linux/ipv6.h>
21 22
#include <linux/list.h>
#include <linux/slab.h>
23
#include <linux/socket.h>
24
#include <linux/spinlock.h>
25
#include <linux/types.h>
26
#include <linux/wait.h>
27
#include <linux/vmalloc.h>
28

29
#include <net/inet_connection_sock.h>
30
#include <net/inet_sock.h>
31
#include <net/sock.h>
32
#include <net/route.h>
33
#include <net/tcp_states.h>
34
#include <net/netns/hash.h>
35

36
#include <asm/atomic.h>
37
#include <asm/byteorder.h>
38

39
/* This is for all connections with a full identity, no wildcards.
40 41
 * One chain is dedicated to TIME_WAIT sockets.
 * I'll experiment with dynamic table growth later.
42 43 44
 */
struct inet_ehash_bucket {
	struct hlist_head chain;
45
	struct hlist_head twchain;
46
};
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79

/* There are a few simple rules, which allow for local port reuse by
 * an application.  In essence:
 *
 *	1) Sockets bound to different interfaces may share a local port.
 *	   Failing that, goto test 2.
 *	2) If all sockets have sk->sk_reuse set, and none of them are in
 *	   TCP_LISTEN state, the port may be shared.
 *	   Failing that, goto test 3.
 *	3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
 *	   address, and none of them are the same, the port may be
 *	   shared.
 *	   Failing this, the port cannot be shared.
 *
 * The interesting point, is test #2.  This is what an FTP server does
 * all day.  To optimize this case we use a specific flag bit defined
 * below.  As we add sockets to a bind bucket list, we perform a
 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
 * As long as all sockets added to a bind bucket pass this test,
 * the flag bit will be set.
 * The resulting situation is that tcp_v[46]_verify_bind() can just check
 * for this flag bit, if it is set and the socket trying to bind has
 * sk->sk_reuse set, we don't even have to walk the owners list at all,
 * we return that it is ok to bind this socket to the requested local port.
 *
 * Sounds like a lot of work, but it is worth it.  In a more naive
 * implementation (ie. current FreeBSD etc.) the entire list of ports
 * must be walked for each data port opened by an ftp server.  Needless
 * to say, this does not scale at all.  With a couple thousand FTP
 * users logged onto your box, isn't it nice to know that new data
 * ports are created in O(1) time?  I thought so. ;-)	-DaveM
 */
struct inet_bind_bucket {
80
	struct net		*ib_net;
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
	unsigned short		port;
	signed short		fastreuse;
	struct hlist_node	node;
	struct hlist_head	owners;
};

#define inet_bind_bucket_for_each(tb, node, head) \
	hlist_for_each_entry(tb, node, head, node)

struct inet_bind_hashbucket {
	spinlock_t		lock;
	struct hlist_head	chain;
};

/* This is for listening sockets, thus all sockets which possess wildcards. */
#define INET_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */

struct inet_hashinfo {
	/* This is for sockets with full identity only.  Sockets here will
	 * always be without wildcards and will have the following invariant:
	 *
	 *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
	 *
104
	 * TIME_WAIT sockets use a separate chain (twchain).
105 106
	 */
	struct inet_ehash_bucket	*ehash;
107 108 109
	rwlock_t			*ehash_locks;
	unsigned int			ehash_size;
	unsigned int			ehash_locks_mask;
110 111 112 113 114 115

	/* Ok, let's try this, I give up, we do need a local binding
	 * TCP hash as well as the others for fast bind/connect.
	 */
	struct inet_bind_hashbucket	*bhash;

S
Stephen Hemminger 已提交
116
	unsigned int			bhash_size;
117
	/* Note : 4 bytes padding on 64 bit arches */
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133

	/* All sockets in TCP_LISTEN state will be in here.  This is the only
	 * table where wildcard'd TCP sockets can exist.  Hash function here
	 * is just local port number.
	 */
	struct hlist_head		listening_hash[INET_LHTABLE_SIZE];

	/* All the above members are written once at bootup and
	 * never written again _or_ are predominantly read-access.
	 *
	 * Now align to a new cache line as all the following members
	 * are often dirty.
	 */
	rwlock_t			lhash_lock ____cacheline_aligned;
	atomic_t			lhash_users;
	wait_queue_head_t		lhash_wait;
134
	struct kmem_cache			*bind_bucket_cachep;
135 136
};

137 138 139 140 141
static inline struct inet_ehash_bucket *inet_ehash_bucket(
	struct inet_hashinfo *hashinfo,
	unsigned int hash)
{
	return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
142 143
}

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
static inline rwlock_t *inet_ehash_lockp(
	struct inet_hashinfo *hashinfo,
	unsigned int hash)
{
	return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
}

static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
{
	unsigned int i, size = 256;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif
	if (nr_pcpus >= 4)
		size = 512;
	if (nr_pcpus >= 8)
		size = 1024;
	if (nr_pcpus >= 16)
		size = 2048;
	if (nr_pcpus >= 32)
		size = 4096;
	if (sizeof(rwlock_t) != 0) {
#ifdef CONFIG_NUMA
		if (size * sizeof(rwlock_t) > PAGE_SIZE)
			hashinfo->ehash_locks = vmalloc(size * sizeof(rwlock_t));
		else
#endif
		hashinfo->ehash_locks =	kmalloc(size * sizeof(rwlock_t),
						GFP_KERNEL);
		if (!hashinfo->ehash_locks)
			return ENOMEM;
		for (i = 0; i < size; i++)
			rwlock_init(&hashinfo->ehash_locks[i]);
	}
	hashinfo->ehash_locks_mask = size - 1;
	return 0;
}

static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
	if (hashinfo->ehash_locks) {
#ifdef CONFIG_NUMA
		unsigned int size = (hashinfo->ehash_locks_mask + 1) *
							sizeof(rwlock_t);
		if (size > PAGE_SIZE)
			vfree(hashinfo->ehash_locks);
		else
#endif
194
		kfree(hashinfo->ehash_locks);
195 196 197 198
		hashinfo->ehash_locks = NULL;
	}
}

199
extern struct inet_bind_bucket *
200
		    inet_bind_bucket_create(struct kmem_cache *cachep,
201
					    struct net *net,
202 203
					    struct inet_bind_hashbucket *head,
					    const unsigned short snum);
204
extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
205 206
				     struct inet_bind_bucket *tb);

207 208
static inline int inet_bhashfn(struct net *net,
		const __u16 lport, const int bhash_size)
209
{
210
	return (lport + net_hash_mix(net)) & (bhash_size - 1);
211 212
}

213 214 215
extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
			   const unsigned short snum);

216
/* These can have wildcards, don't try too hard. */
217
static inline int inet_lhashfn(struct net *net, const unsigned short num)
218
{
219
	return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
220 221 222 223
}

static inline int inet_sk_listen_hashfn(const struct sock *sk)
{
224
	return inet_lhashfn(sock_net(sk), inet_sk(sk)->num);
225 226
}

227
/* Caller must disable local BH processing. */
228
extern void __inet_inherit_port(struct sock *sk, struct sock *child);
229

230
extern void inet_put_port(struct sock *sk);
231

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);

/*
 * - We may sleep inside this lock.
 * - If sleeping is not required (or called from BH),
 *   use plain read_(un)lock(&inet_hashinfo.lhash_lock).
 */
static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
{
	/* read_lock synchronizes to candidates to writers */
	read_lock(&hashinfo->lhash_lock);
	atomic_inc(&hashinfo->lhash_users);
	read_unlock(&hashinfo->lhash_lock);
}

static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
{
	if (atomic_dec_and_test(&hashinfo->lhash_users))
		wake_up(&hashinfo->lhash_wait);
}

253 254 255
extern void __inet_hash_nolisten(struct sock *sk);
extern void inet_hash(struct sock *sk);
extern void inet_unhash(struct sock *sk);
256

257 258
extern struct sock *__inet_lookup_listener(struct net *net,
					   struct inet_hashinfo *hashinfo,
259
					   const __be32 daddr,
260 261 262
					   const unsigned short hnum,
					   const int dif);

263 264 265
static inline struct sock *inet_lookup_listener(struct net *net,
		struct inet_hashinfo *hashinfo,
		__be32 daddr, __be16 dport, int dif)
266
{
267
	return __inet_lookup_listener(net, hashinfo, daddr, ntohs(dport), dif);
268
}
269 270

/* Socket demux engine toys. */
A
Al Viro 已提交
271 272 273 274 275 276 277 278 279
/* What happens here is ugly; there's a pair of adjacent fields in
   struct inet_sock; __be16 dport followed by __u16 num.  We want to
   search by pair, so we combine the keys into a single 32bit value
   and compare with 32bit value read from &...->dport.  Let's at least
   make sure that it's not mixed with anything else...
   On 64bit targets we combine comparisons with pair of adjacent __be32
   fields in the same way.
*/
typedef __u32 __bitwise __portpair;
280 281
#ifdef __BIG_ENDIAN
#define INET_COMBINED_PORTS(__sport, __dport) \
A
Al Viro 已提交
282
	((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
283 284
#else /* __LITTLE_ENDIAN */
#define INET_COMBINED_PORTS(__sport, __dport) \
A
Al Viro 已提交
285
	((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
286 287 288
#endif

#if (BITS_PER_LONG == 64)
A
Al Viro 已提交
289
typedef __u64 __bitwise __addrpair;
290 291
#ifdef __BIG_ENDIAN
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
A
Al Viro 已提交
292 293 294
	const __addrpair __name = (__force __addrpair) ( \
				   (((__force __u64)(__be32)(__saddr)) << 32) | \
				   ((__force __u64)(__be32)(__daddr)));
295 296
#else /* __LITTLE_ENDIAN */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
A
Al Viro 已提交
297 298 299
	const __addrpair __name = (__force __addrpair) ( \
				   (((__force __u64)(__be32)(__daddr)) << 32) | \
				   ((__force __u64)(__be32)(__saddr)));
300
#endif /* __BIG_ENDIAN */
301
#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
302
	(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)	&&	\
A
Al Viro 已提交
303 304
	 ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie))	&&	\
	 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))	&&	\
305
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
306
#define INET_TW_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
307
	(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)	&&	\
A
Al Viro 已提交
308 309
	 ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) &&	\
	 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&	\
310 311 312
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
313
#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)	\
314
	(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)	&&	\
315
	 (inet_sk(__sk)->daddr		== (__saddr))		&&	\
316
	 (inet_sk(__sk)->rcv_saddr	== (__daddr))		&&	\
A
Al Viro 已提交
317
	 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))	&&	\
318
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
319
#define INET_TW_MATCH(__sk, __net, __hash,__cookie, __saddr, __daddr, __ports, __dif)	\
320
	(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)	&&	\
321
	 (inet_twsk(__sk)->tw_daddr	== (__saddr))		&&	\
322
	 (inet_twsk(__sk)->tw_rcv_saddr	== (__daddr))		&&	\
A
Al Viro 已提交
323
	 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&	\
324 325
	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#endif /* 64-bit arch */
326 327 328 329 330 331 332

/*
 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
 * not check it for lookups anymore, thanks Alexey. -DaveM
 *
 * Local BH must be disabled here.
 */
333 334
extern struct sock * __inet_lookup_established(struct net *net,
		struct inet_hashinfo *hashinfo,
335 336
		const __be32 saddr, const __be16 sport,
		const __be32 daddr, const u16 hnum, const int dif);
337

338
static inline struct sock *
339
	inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
340 341
				const __be32 saddr, const __be16 sport,
				const __be32 daddr, const __be16 dport,
342 343
				const int dif)
{
344
	return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
345 346 347
					 ntohs(dport), dif);
}

348 349
static inline struct sock *__inet_lookup(struct net *net,
					 struct inet_hashinfo *hashinfo,
350 351
					 const __be32 saddr, const __be16 sport,
					 const __be32 daddr, const __be16 dport,
352 353
					 const int dif)
{
354
	u16 hnum = ntohs(dport);
355 356 357 358
	struct sock *sk = __inet_lookup_established(net, hashinfo,
				saddr, sport, daddr, hnum, dif);

	return sk ? : __inet_lookup_listener(net, hashinfo, daddr, hnum, dif);
359 360
}

361 362
static inline struct sock *inet_lookup(struct net *net,
				       struct inet_hashinfo *hashinfo,
363 364
				       const __be32 saddr, const __be16 sport,
				       const __be32 daddr, const __be16 dport,
365 366 367 368 369
				       const int dif)
{
	struct sock *sk;

	local_bh_disable();
370
	sk = __inet_lookup(net, hashinfo, saddr, sport, daddr, dport, dif);
371 372 373 374
	local_bh_enable();

	return sk;
}
375

376 377 378 379 380 381 382 383 384 385 386 387
static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
					     struct sk_buff *skb,
					     const __be16 sport,
					     const __be16 dport)
{
	const struct iphdr *iph = ip_hdr(skb);

	return __inet_lookup(dev_net(skb->dst->dev), hashinfo,
			     iph->saddr, sport,
			     iph->daddr, dport, inet_iif(skb));
}

388
extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
389
		struct sock *sk, u32 port_offset,
390 391
		int (*check_established)(struct inet_timewait_death_row *,
			struct sock *, __u16, struct inet_timewait_sock **),
392
			       void (*hash)(struct sock *sk));
393 394
extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
			     struct sock *sk);
395
#endif /* _INET_HASHTABLES_H */