提交 6e04e021 编写于 作者: A Arnaldo Carvalho de Melo 提交者: David S. Miller

[INET]: Move tcp_port_rover to inet_hashinfo

Also expose all of the tcp_hashinfo members, i.e. killing those
tcp_ehash, etc macros, this will more clearly expose already generic
functions and some that need just a bit of work to become generic, as
we'll see in the upcoming changesets.
Signed-off-by: NArnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 2d8c4ce5
...@@ -117,6 +117,7 @@ struct inet_hashinfo { ...@@ -117,6 +117,7 @@ struct inet_hashinfo {
wait_queue_head_t lhash_wait; wait_queue_head_t lhash_wait;
spinlock_t portalloc_lock; spinlock_t portalloc_lock;
kmem_cache_t *bind_bucket_cachep; kmem_cache_t *bind_bucket_cachep;
int port_rover;
}; };
static inline int inet_ehashfn(const __u32 laddr, const __u16 lport, static inline int inet_ehashfn(const __u32 laddr, const __u16 lport,
......
...@@ -136,7 +136,7 @@ struct sock_common { ...@@ -136,7 +136,7 @@ struct sock_common {
* @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_lingertime: %SO_LINGER l_linger setting * @sk_lingertime: %SO_LINGER l_linger setting
* @sk_hashent: hash entry in several tables (e.g. tcp_ehash) * @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash)
* @sk_backlog: always used with the per-socket spinlock held * @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct * @sk_callback_lock: used with the callbacks in the end of this struct
* @sk_error_queue: rarely used * @sk_error_queue: rarely used
......
...@@ -41,19 +41,7 @@ ...@@ -41,19 +41,7 @@
#endif #endif
#include <linux/seq_file.h> #include <linux/seq_file.h>
extern struct inet_hashinfo tcp_hashinfo; extern struct inet_hashinfo tcp_hashinfo;
#define tcp_ehash (tcp_hashinfo.ehash)
#define tcp_bhash (tcp_hashinfo.bhash)
#define tcp_ehash_size (tcp_hashinfo.ehash_size)
#define tcp_bhash_size (tcp_hashinfo.bhash_size)
#define tcp_listening_hash (tcp_hashinfo.listening_hash)
#define tcp_lhash_lock (tcp_hashinfo.lhash_lock)
#define tcp_lhash_users (tcp_hashinfo.lhash_users)
#define tcp_lhash_wait (tcp_hashinfo.lhash_wait)
#define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock)
#define tcp_bucket_cachep (tcp_hashinfo.bind_bucket_cachep)
extern int tcp_port_rover;
#if (BITS_PER_LONG == 64) #if (BITS_PER_LONG == 64)
#define TCP_ADDRCMP_ALIGN_BYTES 8 #define TCP_ADDRCMP_ALIGN_BYTES 8
...@@ -1463,21 +1451,21 @@ extern void tcp_listen_wlock(void); ...@@ -1463,21 +1451,21 @@ extern void tcp_listen_wlock(void);
/* - We may sleep inside this lock. /* - We may sleep inside this lock.
* - If sleeping is not required (or called from BH), * - If sleeping is not required (or called from BH),
* use plain read_(un)lock(&tcp_lhash_lock). * use plain read_(un)lock(&inet_hashinfo.lhash_lock).
*/ */
static inline void tcp_listen_lock(void) static inline void tcp_listen_lock(void)
{ {
/* read_lock synchronizes to candidates to writers */ /* read_lock synchronizes to candidates to writers */
read_lock(&tcp_lhash_lock); read_lock(&tcp_hashinfo.lhash_lock);
atomic_inc(&tcp_lhash_users); atomic_inc(&tcp_hashinfo.lhash_users);
read_unlock(&tcp_lhash_lock); read_unlock(&tcp_hashinfo.lhash_lock);
} }
static inline void tcp_listen_unlock(void) static inline void tcp_listen_unlock(void)
{ {
if (atomic_dec_and_test(&tcp_lhash_users)) if (atomic_dec_and_test(&tcp_hashinfo.lhash_users))
wake_up(&tcp_lhash_wait); wake_up(&tcp_hashinfo.lhash_wait);
} }
static inline int keepalive_intvl_when(const struct tcp_sock *tp) static inline int keepalive_intvl_when(const struct tcp_sock *tp)
......
...@@ -2257,11 +2257,11 @@ void __init tcp_init(void) ...@@ -2257,11 +2257,11 @@ void __init tcp_init(void)
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
sizeof(skb->cb)); sizeof(skb->cb));
tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket", tcp_hashinfo.bind_bucket_cachep =
sizeof(struct inet_bind_bucket), kmem_cache_create("tcp_bind_bucket",
0, SLAB_HWCACHE_ALIGN, sizeof(struct inet_bind_bucket), 0,
NULL, NULL); SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!tcp_bucket_cachep) if (!tcp_hashinfo.bind_bucket_cachep)
panic("tcp_init: Cannot alloc tcp_bind_bucket cache."); panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket", tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
...@@ -2276,7 +2276,7 @@ void __init tcp_init(void) ...@@ -2276,7 +2276,7 @@ void __init tcp_init(void)
* *
* The methodology is similar to that of the buffer cache. * The methodology is similar to that of the buffer cache.
*/ */
tcp_ehash = tcp_hashinfo.ehash =
alloc_large_system_hash("TCP established", alloc_large_system_hash("TCP established",
sizeof(struct inet_ehash_bucket), sizeof(struct inet_ehash_bucket),
thash_entries, thash_entries,
...@@ -2284,37 +2284,37 @@ void __init tcp_init(void) ...@@ -2284,37 +2284,37 @@ void __init tcp_init(void)
(25 - PAGE_SHIFT) : (25 - PAGE_SHIFT) :
(27 - PAGE_SHIFT), (27 - PAGE_SHIFT),
HASH_HIGHMEM, HASH_HIGHMEM,
&tcp_ehash_size, &tcp_hashinfo.ehash_size,
NULL, NULL,
0); 0);
tcp_ehash_size = (1 << tcp_ehash_size) >> 1; tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
for (i = 0; i < (tcp_ehash_size << 1); i++) { for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
rwlock_init(&tcp_ehash[i].lock); rwlock_init(&tcp_hashinfo.ehash[i].lock);
INIT_HLIST_HEAD(&tcp_ehash[i].chain); INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
} }
tcp_bhash = tcp_hashinfo.bhash =
alloc_large_system_hash("TCP bind", alloc_large_system_hash("TCP bind",
sizeof(struct inet_bind_hashbucket), sizeof(struct inet_bind_hashbucket),
tcp_ehash_size, tcp_hashinfo.ehash_size,
(num_physpages >= 128 * 1024) ? (num_physpages >= 128 * 1024) ?
(25 - PAGE_SHIFT) : (25 - PAGE_SHIFT) :
(27 - PAGE_SHIFT), (27 - PAGE_SHIFT),
HASH_HIGHMEM, HASH_HIGHMEM,
&tcp_bhash_size, &tcp_hashinfo.bhash_size,
NULL, NULL,
64 * 1024); 64 * 1024);
tcp_bhash_size = 1 << tcp_bhash_size; tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
for (i = 0; i < tcp_bhash_size; i++) { for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
spin_lock_init(&tcp_bhash[i].lock); spin_lock_init(&tcp_hashinfo.bhash[i].lock);
INIT_HLIST_HEAD(&tcp_bhash[i].chain); INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
} }
/* Try to be a bit smarter and adjust defaults depending /* Try to be a bit smarter and adjust defaults depending
* on available memory. * on available memory.
*/ */
for (order = 0; ((1 << order) << PAGE_SHIFT) < for (order = 0; ((1 << order) << PAGE_SHIFT) <
(tcp_bhash_size * sizeof(struct inet_bind_hashbucket)); (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
order++) order++)
; ;
if (order >= 4) { if (order >= 4) {
...@@ -2329,7 +2329,7 @@ void __init tcp_init(void) ...@@ -2329,7 +2329,7 @@ void __init tcp_init(void)
sysctl_tcp_max_orphans >>= (3 - order); sysctl_tcp_max_orphans >>= (3 - order);
sysctl_max_syn_backlog = 128; sysctl_max_syn_backlog = 128;
} }
tcp_port_rover = sysctl_local_port_range[0] - 1; tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1;
sysctl_tcp_mem[0] = 768 << order; sysctl_tcp_mem[0] = 768 << order;
sysctl_tcp_mem[1] = 1024 << order; sysctl_tcp_mem[1] = 1024 << order;
...@@ -2344,7 +2344,7 @@ void __init tcp_init(void) ...@@ -2344,7 +2344,7 @@ void __init tcp_init(void)
printk(KERN_INFO "TCP: Hash tables configured " printk(KERN_INFO "TCP: Hash tables configured "
"(established %d bind %d)\n", "(established %d bind %d)\n",
tcp_ehash_size << 1, tcp_bhash_size); tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
tcp_register_congestion_control(&tcp_reno); tcp_register_congestion_control(&tcp_reno);
} }
......
...@@ -595,7 +595,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -595,7 +595,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct hlist_node *node; struct hlist_node *node;
num = 0; num = 0;
sk_for_each(sk, node, &tcp_listening_hash[i]) { sk_for_each(sk, node, &tcp_hashinfo.listening_hash[i]) {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
if (num < s_num) { if (num < s_num) {
...@@ -645,8 +645,8 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -645,8 +645,8 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!(r->tcpdiag_states&~(TCPF_LISTEN|TCPF_SYN_RECV))) if (!(r->tcpdiag_states&~(TCPF_LISTEN|TCPF_SYN_RECV)))
return skb->len; return skb->len;
for (i = s_i; i < tcp_ehash_size; i++) { for (i = s_i; i < tcp_hashinfo.ehash_size; i++) {
struct inet_ehash_bucket *head = &tcp_ehash[i]; struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[i];
struct sock *sk; struct sock *sk;
struct hlist_node *node; struct hlist_node *node;
...@@ -678,7 +678,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -678,7 +678,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (r->tcpdiag_states&TCPF_TIME_WAIT) { if (r->tcpdiag_states&TCPF_TIME_WAIT) {
sk_for_each(sk, node, sk_for_each(sk, node,
&tcp_ehash[i + tcp_ehash_size].chain) { &tcp_hashinfo.ehash[i + tcp_hashinfo.ehash_size].chain) {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
if (num < s_num) if (num < s_num)
......
...@@ -94,6 +94,7 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { ...@@ -94,6 +94,7 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
.lhash_users = ATOMIC_INIT(0), .lhash_users = ATOMIC_INIT(0),
.lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
.portalloc_lock = SPIN_LOCK_UNLOCKED, .portalloc_lock = SPIN_LOCK_UNLOCKED,
.port_rover = 1024 - 1,
}; };
/* /*
...@@ -102,7 +103,6 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { ...@@ -102,7 +103,6 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
* 32768-61000 * 32768-61000
*/ */
int sysctl_local_port_range[2] = { 1024, 4999 }; int sysctl_local_port_range[2] = { 1024, 4999 };
int tcp_port_rover = 1024 - 1;
static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
{ {
...@@ -146,16 +146,16 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -146,16 +146,16 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
int remaining = (high - low) + 1; int remaining = (high - low) + 1;
int rover; int rover;
spin_lock(&tcp_portalloc_lock); spin_lock(&tcp_hashinfo.portalloc_lock);
if (tcp_port_rover < low) if (tcp_hashinfo.port_rover < low)
rover = low; rover = low;
else else
rover = tcp_port_rover; rover = tcp_hashinfo.port_rover;
do { do {
rover++; rover++;
if (rover > high) if (rover > high)
rover = low; rover = low;
head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)]; head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock); spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain) inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == rover) if (tb->port == rover)
...@@ -164,8 +164,8 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -164,8 +164,8 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
next: next:
spin_unlock(&head->lock); spin_unlock(&head->lock);
} while (--remaining > 0); } while (--remaining > 0);
tcp_port_rover = rover; tcp_hashinfo.port_rover = rover;
spin_unlock(&tcp_portalloc_lock); spin_unlock(&tcp_hashinfo.portalloc_lock);
/* Exhausted local port range during search? It is not /* Exhausted local port range during search? It is not
* possible for us to be holding one of the bind hash * possible for us to be holding one of the bind hash
...@@ -182,7 +182,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -182,7 +182,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
*/ */
snum = rover; snum = rover;
} else { } else {
head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock); spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain) inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == snum) if (tb->port == snum)
...@@ -205,7 +205,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -205,7 +205,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
} }
tb_not_found: tb_not_found:
ret = 1; ret = 1;
if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL) if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL)
goto fail_unlock; goto fail_unlock;
if (hlist_empty(&tb->owners)) { if (hlist_empty(&tb->owners)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
...@@ -237,22 +237,22 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -237,22 +237,22 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
void tcp_listen_wlock(void) void tcp_listen_wlock(void)
{ {
write_lock(&tcp_lhash_lock); write_lock(&tcp_hashinfo.lhash_lock);
if (atomic_read(&tcp_lhash_users)) { if (atomic_read(&tcp_hashinfo.lhash_users)) {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
for (;;) { for (;;) {
prepare_to_wait_exclusive(&tcp_lhash_wait, prepare_to_wait_exclusive(&tcp_hashinfo.lhash_wait,
&wait, TASK_UNINTERRUPTIBLE); &wait, TASK_UNINTERRUPTIBLE);
if (!atomic_read(&tcp_lhash_users)) if (!atomic_read(&tcp_hashinfo.lhash_users))
break; break;
write_unlock_bh(&tcp_lhash_lock); write_unlock_bh(&tcp_hashinfo.lhash_lock);
schedule(); schedule();
write_lock_bh(&tcp_lhash_lock); write_lock_bh(&tcp_hashinfo.lhash_lock);
} }
finish_wait(&tcp_lhash_wait, &wait); finish_wait(&tcp_hashinfo.lhash_wait, &wait);
} }
} }
...@@ -263,20 +263,20 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible) ...@@ -263,20 +263,20 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
BUG_TRAP(sk_unhashed(sk)); BUG_TRAP(sk_unhashed(sk));
if (listen_possible && sk->sk_state == TCP_LISTEN) { if (listen_possible && sk->sk_state == TCP_LISTEN) {
list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)]; list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)];
lock = &tcp_lhash_lock; lock = &tcp_hashinfo.lhash_lock;
tcp_listen_wlock(); tcp_listen_wlock();
} else { } else {
sk->sk_hashent = inet_sk_ehashfn(sk, tcp_ehash_size); sk->sk_hashent = inet_sk_ehashfn(sk, tcp_hashinfo.ehash_size);
list = &tcp_ehash[sk->sk_hashent].chain; list = &tcp_hashinfo.ehash[sk->sk_hashent].chain;
lock = &tcp_ehash[sk->sk_hashent].lock; lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock;
write_lock(lock); write_lock(lock);
} }
__sk_add_node(sk, list); __sk_add_node(sk, list);
sock_prot_inc_use(sk->sk_prot); sock_prot_inc_use(sk->sk_prot);
write_unlock(lock); write_unlock(lock);
if (listen_possible && sk->sk_state == TCP_LISTEN) if (listen_possible && sk->sk_state == TCP_LISTEN)
wake_up(&tcp_lhash_wait); wake_up(&tcp_hashinfo.lhash_wait);
} }
static void tcp_v4_hash(struct sock *sk) static void tcp_v4_hash(struct sock *sk)
...@@ -298,9 +298,9 @@ void tcp_unhash(struct sock *sk) ...@@ -298,9 +298,9 @@ void tcp_unhash(struct sock *sk)
if (sk->sk_state == TCP_LISTEN) { if (sk->sk_state == TCP_LISTEN) {
local_bh_disable(); local_bh_disable();
tcp_listen_wlock(); tcp_listen_wlock();
lock = &tcp_lhash_lock; lock = &tcp_hashinfo.lhash_lock;
} else { } else {
struct inet_ehash_bucket *head = &tcp_ehash[sk->sk_hashent]; struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[sk->sk_hashent];
lock = &head->lock; lock = &head->lock;
write_lock_bh(&head->lock); write_lock_bh(&head->lock);
} }
...@@ -311,7 +311,7 @@ void tcp_unhash(struct sock *sk) ...@@ -311,7 +311,7 @@ void tcp_unhash(struct sock *sk)
ende: ende:
if (sk->sk_state == TCP_LISTEN) if (sk->sk_state == TCP_LISTEN)
wake_up(&tcp_lhash_wait); wake_up(&tcp_hashinfo.lhash_wait);
} }
/* Don't inline this cruft. Here are some nice properties to /* Don't inline this cruft. Here are some nice properties to
...@@ -366,8 +366,8 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr, ...@@ -366,8 +366,8 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
struct sock *sk = NULL; struct sock *sk = NULL;
struct hlist_head *head; struct hlist_head *head;
read_lock(&tcp_lhash_lock); read_lock(&tcp_hashinfo.lhash_lock);
head = &tcp_listening_hash[inet_lhashfn(hnum)]; head = &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)];
if (!hlist_empty(head)) { if (!hlist_empty(head)) {
struct inet_sock *inet = inet_sk((sk = __sk_head(head))); struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
...@@ -382,7 +382,7 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr, ...@@ -382,7 +382,7 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
sherry_cache: sherry_cache:
sock_hold(sk); sock_hold(sk);
} }
read_unlock(&tcp_lhash_lock); read_unlock(&tcp_hashinfo.lhash_lock);
return sk; return sk;
} }
...@@ -406,8 +406,8 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr, ...@@ -406,8 +406,8 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
/* Optimize here for direct hit, only listening connections can /* Optimize here for direct hit, only listening connections can
* have wildcards anyways. * have wildcards anyways.
*/ */
const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_ehash_size); const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_hashinfo.ehash_size);
head = &tcp_ehash[hash]; head = &tcp_hashinfo.ehash[hash];
read_lock(&head->lock); read_lock(&head->lock);
sk_for_each(sk, node, &head->chain) { sk_for_each(sk, node, &head->chain) {
if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
...@@ -415,7 +415,7 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr, ...@@ -415,7 +415,7 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
} }
/* Must check for a TIME_WAIT'er before going to listener hash. */ /* Must check for a TIME_WAIT'er before going to listener hash. */
sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) { sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit; goto hit;
} }
...@@ -469,8 +469,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, ...@@ -469,8 +469,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
int dif = sk->sk_bound_dev_if; int dif = sk->sk_bound_dev_if;
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
__u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_ehash_size); const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size);
struct inet_ehash_bucket *head = &tcp_ehash[hash]; struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
struct sock *sk2; struct sock *sk2;
struct hlist_node *node; struct hlist_node *node;
struct tcp_tw_bucket *tw; struct tcp_tw_bucket *tw;
...@@ -478,7 +478,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, ...@@ -478,7 +478,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
write_lock(&head->lock); write_lock(&head->lock);
/* Check TIME-WAIT sockets first. */ /* Check TIME-WAIT sockets first. */
sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
tw = (struct tcp_tw_bucket *)sk2; tw = (struct tcp_tw_bucket *)sk2;
if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
...@@ -582,7 +582,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk) ...@@ -582,7 +582,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
local_bh_disable(); local_bh_disable();
for (i = 1; i <= range; i++) { for (i = 1; i <= range; i++) {
port = low + (i + offset) % range; port = low + (i + offset) % range;
head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)]; head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock); spin_lock(&head->lock);
/* Does not bother with rcv_saddr checks, /* Does not bother with rcv_saddr checks,
...@@ -602,7 +602,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk) ...@@ -602,7 +602,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
} }
} }
tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port); tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
if (!tb) { if (!tb) {
spin_unlock(&head->lock); spin_unlock(&head->lock);
break; break;
...@@ -637,7 +637,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk) ...@@ -637,7 +637,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
goto out; goto out;
} }
head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
tb = inet_sk(sk)->bind_hash; tb = inet_sk(sk)->bind_hash;
spin_lock_bh(&head->lock); spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
...@@ -1926,7 +1926,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) ...@@ -1926,7 +1926,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
if (!sk) { if (!sk) {
st->bucket = 0; st->bucket = 0;
sk = sk_head(&tcp_listening_hash[0]); sk = sk_head(&tcp_hashinfo.listening_hash[0]);
goto get_sk; goto get_sk;
} }
...@@ -1980,7 +1980,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) ...@@ -1980,7 +1980,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
read_unlock_bh(&tp->accept_queue.syn_wait_lock); read_unlock_bh(&tp->accept_queue.syn_wait_lock);
} }
if (++st->bucket < INET_LHTABLE_SIZE) { if (++st->bucket < INET_LHTABLE_SIZE) {
sk = sk_head(&tcp_listening_hash[st->bucket]); sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
goto get_sk; goto get_sk;
} }
cur = NULL; cur = NULL;
...@@ -2004,7 +2004,7 @@ static void *established_get_first(struct seq_file *seq) ...@@ -2004,7 +2004,7 @@ static void *established_get_first(struct seq_file *seq)
struct tcp_iter_state* st = seq->private; struct tcp_iter_state* st = seq->private;
void *rc = NULL; void *rc = NULL;
for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) { for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
struct sock *sk; struct sock *sk;
struct hlist_node *node; struct hlist_node *node;
struct tcp_tw_bucket *tw; struct tcp_tw_bucket *tw;
...@@ -2012,8 +2012,8 @@ static void *established_get_first(struct seq_file *seq) ...@@ -2012,8 +2012,8 @@ static void *established_get_first(struct seq_file *seq)
/* We can reschedule _before_ having picked the target: */ /* We can reschedule _before_ having picked the target: */
cond_resched_softirq(); cond_resched_softirq();
read_lock(&tcp_ehash[st->bucket].lock); read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) { sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
if (sk->sk_family != st->family) { if (sk->sk_family != st->family) {
continue; continue;
} }
...@@ -2022,14 +2022,14 @@ static void *established_get_first(struct seq_file *seq) ...@@ -2022,14 +2022,14 @@ static void *established_get_first(struct seq_file *seq)
} }
st->state = TCP_SEQ_STATE_TIME_WAIT; st->state = TCP_SEQ_STATE_TIME_WAIT;
tw_for_each(tw, node, tw_for_each(tw, node,
&tcp_ehash[st->bucket + tcp_ehash_size].chain) { &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
if (tw->tw_family != st->family) { if (tw->tw_family != st->family) {
continue; continue;
} }
rc = tw; rc = tw;
goto out; goto out;
} }
read_unlock(&tcp_ehash[st->bucket].lock); read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
st->state = TCP_SEQ_STATE_ESTABLISHED; st->state = TCP_SEQ_STATE_ESTABLISHED;
} }
out: out:
...@@ -2056,15 +2056,15 @@ static void *established_get_next(struct seq_file *seq, void *cur) ...@@ -2056,15 +2056,15 @@ static void *established_get_next(struct seq_file *seq, void *cur)
cur = tw; cur = tw;
goto out; goto out;
} }
read_unlock(&tcp_ehash[st->bucket].lock); read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
st->state = TCP_SEQ_STATE_ESTABLISHED; st->state = TCP_SEQ_STATE_ESTABLISHED;
/* We can reschedule between buckets: */ /* We can reschedule between buckets: */
cond_resched_softirq(); cond_resched_softirq();
if (++st->bucket < tcp_ehash_size) { if (++st->bucket < tcp_hashinfo.ehash_size) {
read_lock(&tcp_ehash[st->bucket].lock); read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
sk = sk_head(&tcp_ehash[st->bucket].chain); sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
} else { } else {
cur = NULL; cur = NULL;
goto out; goto out;
...@@ -2078,7 +2078,7 @@ static void *established_get_next(struct seq_file *seq, void *cur) ...@@ -2078,7 +2078,7 @@ static void *established_get_next(struct seq_file *seq, void *cur)
} }
st->state = TCP_SEQ_STATE_TIME_WAIT; st->state = TCP_SEQ_STATE_TIME_WAIT;
tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain); tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
goto get_tw; goto get_tw;
found: found:
cur = sk; cur = sk;
...@@ -2173,7 +2173,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) ...@@ -2173,7 +2173,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
case TCP_SEQ_STATE_TIME_WAIT: case TCP_SEQ_STATE_TIME_WAIT:
case TCP_SEQ_STATE_ESTABLISHED: case TCP_SEQ_STATE_ESTABLISHED:
if (v) if (v)
read_unlock(&tcp_ehash[st->bucket].lock); read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
local_bh_enable(); local_bh_enable();
break; break;
} }
...@@ -2432,7 +2432,6 @@ EXPORT_SYMBOL(ipv4_specific); ...@@ -2432,7 +2432,6 @@ EXPORT_SYMBOL(ipv4_specific);
EXPORT_SYMBOL(inet_bind_bucket_create); EXPORT_SYMBOL(inet_bind_bucket_create);
EXPORT_SYMBOL(tcp_hashinfo); EXPORT_SYMBOL(tcp_hashinfo);
EXPORT_SYMBOL(tcp_listen_wlock); EXPORT_SYMBOL(tcp_listen_wlock);
EXPORT_SYMBOL(tcp_port_rover);
EXPORT_SYMBOL(tcp_prot); EXPORT_SYMBOL(tcp_prot);
EXPORT_SYMBOL(tcp_unhash); EXPORT_SYMBOL(tcp_unhash);
EXPORT_SYMBOL(tcp_v4_conn_request); EXPORT_SYMBOL(tcp_v4_conn_request);
......
...@@ -60,12 +60,11 @@ int tcp_tw_count; ...@@ -60,12 +60,11 @@ int tcp_tw_count;
/* Must be called with locally disabled BHs. */ /* Must be called with locally disabled BHs. */
static void tcp_timewait_kill(struct tcp_tw_bucket *tw) static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
{ {
struct inet_ehash_bucket *ehead;
struct inet_bind_hashbucket *bhead; struct inet_bind_hashbucket *bhead;
struct inet_bind_bucket *tb; struct inet_bind_bucket *tb;
/* Unlink from established hashes. */ /* Unlink from established hashes. */
ehead = &tcp_ehash[tw->tw_hashent]; struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[tw->tw_hashent];
write_lock(&ehead->lock); write_lock(&ehead->lock);
if (hlist_unhashed(&tw->tw_node)) { if (hlist_unhashed(&tw->tw_node)) {
write_unlock(&ehead->lock); write_unlock(&ehead->lock);
...@@ -76,12 +75,12 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw) ...@@ -76,12 +75,12 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
write_unlock(&ehead->lock); write_unlock(&ehead->lock);
/* Disassociate with bind bucket. */ /* Disassociate with bind bucket. */
bhead = &tcp_bhash[inet_bhashfn(tw->tw_num, tcp_bhash_size)]; bhead = &tcp_hashinfo.bhash[inet_bhashfn(tw->tw_num, tcp_hashinfo.bhash_size)];
spin_lock(&bhead->lock); spin_lock(&bhead->lock);
tb = tw->tw_tb; tb = tw->tw_tb;
__hlist_del(&tw->tw_bind_node); __hlist_del(&tw->tw_bind_node);
tw->tw_tb = NULL; tw->tw_tb = NULL;
inet_bind_bucket_destroy(tcp_bucket_cachep, tb); inet_bind_bucket_destroy(tcp_hashinfo.bind_bucket_cachep, tb);
spin_unlock(&bhead->lock); spin_unlock(&bhead->lock);
#ifdef SOCK_REFCNT_DEBUG #ifdef SOCK_REFCNT_DEBUG
...@@ -297,13 +296,13 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, ...@@ -297,13 +296,13 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
{ {
const struct inet_sock *inet = inet_sk(sk); const struct inet_sock *inet = inet_sk(sk);
struct inet_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent]; struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent];
struct inet_bind_hashbucket *bhead; struct inet_bind_hashbucket *bhead;
/* Step 1: Put TW into bind hash. Original socket stays there too. /* Step 1: Put TW into bind hash. Original socket stays there too.
Note, that any socket with inet->num != 0 MUST be bound in Note, that any socket with inet->num != 0 MUST be bound in
binding cache, even if it is closed. binding cache, even if it is closed.
*/ */
bhead = &tcp_bhash[inet_bhashfn(inet->num, tcp_bhash_size)]; bhead = &tcp_hashinfo.bhash[inet_bhashfn(inet->num, tcp_hashinfo.bhash_size)];
spin_lock(&bhead->lock); spin_lock(&bhead->lock);
tw->tw_tb = inet->bind_hash; tw->tw_tb = inet->bind_hash;
BUG_TRAP(inet->bind_hash); BUG_TRAP(inet->bind_hash);
...@@ -317,7 +316,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) ...@@ -317,7 +316,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
sock_prot_dec_use(sk->sk_prot); sock_prot_dec_use(sk->sk_prot);
/* Step 3: Hash TW into TIMEWAIT half of established hash table. */ /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
tw_add_node(tw, &(ehead + tcp_ehash_size)->chain); tw_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain);
atomic_inc(&tw->tw_refcnt); atomic_inc(&tw->tw_refcnt);
write_unlock(&ehead->lock); write_unlock(&ehead->lock);
......
...@@ -84,7 +84,7 @@ static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport, ...@@ -84,7 +84,7 @@ static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport,
hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]); hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
hashent ^= hashent>>16; hashent ^= hashent>>16;
hashent ^= hashent>>8; hashent ^= hashent>>8;
return (hashent & (tcp_ehash_size - 1)); return (hashent & (tcp_hashinfo.ehash_size - 1));
} }
static __inline__ int tcp_v6_sk_hashfn(struct sock *sk) static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
...@@ -138,15 +138,15 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) ...@@ -138,15 +138,15 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
int remaining = (high - low) + 1; int remaining = (high - low) + 1;
int rover; int rover;
spin_lock(&tcp_portalloc_lock); spin_lock(&tcp_hashinfo.portalloc_lock);
if (tcp_port_rover < low) if (tcp_hashinfo.port_rover < low)
rover = low; rover = low;
else else
rover = tcp_port_rover; rover = tcp_hashinfo.port_rover;
do { rover++; do { rover++;
if (rover > high) if (rover > high)
rover = low; rover = low;
head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)]; head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock); spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain) inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == rover) if (tb->port == rover)
...@@ -155,8 +155,8 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) ...@@ -155,8 +155,8 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
next: next:
spin_unlock(&head->lock); spin_unlock(&head->lock);
} while (--remaining > 0); } while (--remaining > 0);
tcp_port_rover = rover; tcp_hashinfo.port_rover = rover;
spin_unlock(&tcp_portalloc_lock); spin_unlock(&tcp_hashinfo.portalloc_lock);
/* Exhausted local port range during search? It is not /* Exhausted local port range during search? It is not
* possible for us to be holding one of the bind hash * possible for us to be holding one of the bind hash
...@@ -171,7 +171,7 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) ...@@ -171,7 +171,7 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
/* OK, here is the one we will use. */ /* OK, here is the one we will use. */
snum = rover; snum = rover;
} else { } else {
head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock); spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain) inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == snum) if (tb->port == snum)
...@@ -192,8 +192,11 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) ...@@ -192,8 +192,11 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
} }
tb_not_found: tb_not_found:
ret = 1; ret = 1;
if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL) if (tb == NULL) {
goto fail_unlock; tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum);
if (tb == NULL)
goto fail_unlock;
}
if (hlist_empty(&tb->owners)) { if (hlist_empty(&tb->owners)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
tb->fastreuse = 1; tb->fastreuse = 1;
...@@ -224,13 +227,13 @@ static __inline__ void __tcp_v6_hash(struct sock *sk) ...@@ -224,13 +227,13 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
BUG_TRAP(sk_unhashed(sk)); BUG_TRAP(sk_unhashed(sk));
if (sk->sk_state == TCP_LISTEN) { if (sk->sk_state == TCP_LISTEN) {
list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)]; list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)];
lock = &tcp_lhash_lock; lock = &tcp_hashinfo.lhash_lock;
tcp_listen_wlock(); tcp_listen_wlock();
} else { } else {
sk->sk_hashent = tcp_v6_sk_hashfn(sk); sk->sk_hashent = tcp_v6_sk_hashfn(sk);
list = &tcp_ehash[sk->sk_hashent].chain; list = &tcp_hashinfo.ehash[sk->sk_hashent].chain;
lock = &tcp_ehash[sk->sk_hashent].lock; lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock;
write_lock(lock); write_lock(lock);
} }
...@@ -263,8 +266,8 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor ...@@ -263,8 +266,8 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor
int score, hiscore; int score, hiscore;
hiscore=0; hiscore=0;
read_lock(&tcp_lhash_lock); read_lock(&tcp_hashinfo.lhash_lock);
sk_for_each(sk, node, &tcp_listening_hash[inet_lhashfn(hnum)]) { sk_for_each(sk, node, &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)]) {
if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) { if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
...@@ -291,7 +294,7 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor ...@@ -291,7 +294,7 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor
} }
if (result) if (result)
sock_hold(result); sock_hold(result);
read_unlock(&tcp_lhash_lock); read_unlock(&tcp_hashinfo.lhash_lock);
return result; return result;
} }
...@@ -315,7 +318,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u ...@@ -315,7 +318,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u
* have wildcards anyways. * have wildcards anyways.
*/ */
hash = tcp_v6_hashfn(daddr, hnum, saddr, sport); hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
head = &tcp_ehash[hash]; head = &tcp_hashinfo.ehash[hash];
read_lock(&head->lock); read_lock(&head->lock);
sk_for_each(sk, node, &head->chain) { sk_for_each(sk, node, &head->chain) {
/* For IPV6 do the cheaper port and family tests first. */ /* For IPV6 do the cheaper port and family tests first. */
...@@ -323,7 +326,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u ...@@ -323,7 +326,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u
goto hit; /* You sunk my battleship! */ goto hit; /* You sunk my battleship! */
} }
/* Must check for a TIME_WAIT'er before going to listener hash. */ /* Must check for a TIME_WAIT'er before going to listener hash. */
sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) { sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
/* FIXME: acme: check this... */ /* FIXME: acme: check this... */
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
...@@ -461,7 +464,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport, ...@@ -461,7 +464,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
int dif = sk->sk_bound_dev_if; int dif = sk->sk_bound_dev_if;
u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport);
struct inet_ehash_bucket *head = &tcp_ehash[hash]; struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
struct sock *sk2; struct sock *sk2;
struct hlist_node *node; struct hlist_node *node;
struct tcp_tw_bucket *tw; struct tcp_tw_bucket *tw;
...@@ -469,7 +472,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport, ...@@ -469,7 +472,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
write_lock(&head->lock); write_lock(&head->lock);
/* Check TIME-WAIT sockets first. */ /* Check TIME-WAIT sockets first. */
sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
tw = (struct tcp_tw_bucket*)sk2; tw = (struct tcp_tw_bucket*)sk2;
if(*((__u32 *)&(tw->tw_dport)) == ports && if(*((__u32 *)&(tw->tw_dport)) == ports &&
...@@ -558,7 +561,7 @@ static int tcp_v6_hash_connect(struct sock *sk) ...@@ -558,7 +561,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
local_bh_disable(); local_bh_disable();
for (i = 1; i <= range; i++) { for (i = 1; i <= range; i++) {
port = low + (i + offset) % range; port = low + (i + offset) % range;
head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)]; head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock); spin_lock(&head->lock);
/* Does not bother with rcv_saddr checks, /* Does not bother with rcv_saddr checks,
...@@ -578,7 +581,7 @@ static int tcp_v6_hash_connect(struct sock *sk) ...@@ -578,7 +581,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
} }
} }
tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port); tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
if (!tb) { if (!tb) {
spin_unlock(&head->lock); spin_unlock(&head->lock);
break; break;
...@@ -613,7 +616,7 @@ static int tcp_v6_hash_connect(struct sock *sk) ...@@ -613,7 +616,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
goto out; goto out;
} }
head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
tb = inet_sk(sk)->bind_hash; tb = inet_sk(sk)->bind_hash;
spin_lock_bh(&head->lock); spin_lock_bh(&head->lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册