提交 152da81d 编写于 作者: P Pavel Emelyanov 提交者: David S. Miller

[INET]: Uninline the __inet_hash function.

This one is used in quite many places in the networking code and
seems to big to be inline.

After the patch net/ipv4/build-in.o loses ~650 bytes:
add/remove: 2/0 grow/shrink: 0/5 up/down: 461/-1114 (-653)
function                                     old     new   delta
__inet_hash_nolisten                           -     282    +282
__inet_hash                                    -     179    +179
tcp_sacktag_write_queue                     2255    2254      -1
__inet_lookup_listener                       284     274     -10
tcp_v4_syn_recv_sock                         755     493    -262
tcp_v4_hash                                  389      35    -354
inet_hash_connect                           1086     599    -487

This version addresses the issue pointed by Eric, that
while being inline this function was optimized by gcc
in respect to the 'listen_possible' argument.
Signed-off-by: NPavel Emelyanov <xemul@openvz.org>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 d6701191
...@@ -264,37 +264,14 @@ static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo) ...@@ -264,37 +264,14 @@ static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
wake_up(&hashinfo->lhash_wait); wake_up(&hashinfo->lhash_wait);
} }
static inline void __inet_hash(struct inet_hashinfo *hashinfo, extern void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk);
struct sock *sk, const int listen_possible) extern void __inet_hash_nolisten(struct inet_hashinfo *hinfo, struct sock *sk);
{
struct hlist_head *list;
rwlock_t *lock;
BUG_TRAP(sk_unhashed(sk));
if (listen_possible && sk->sk_state == TCP_LISTEN) {
list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
lock = &hashinfo->lhash_lock;
inet_listen_wlock(hashinfo);
} else {
struct inet_ehash_bucket *head;
sk->sk_hash = inet_sk_ehashfn(sk);
head = inet_ehash_bucket(hashinfo, sk->sk_hash);
list = &head->chain;
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
write_lock(lock);
}
__sk_add_node(sk, list);
sock_prot_inc_use(sk->sk_prot);
write_unlock(lock);
if (listen_possible && sk->sk_state == TCP_LISTEN)
wake_up(&hashinfo->lhash_wait);
}
static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk) static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
{ {
if (sk->sk_state != TCP_CLOSE) { if (sk->sk_state != TCP_CLOSE) {
local_bh_disable(); local_bh_disable();
__inet_hash(hashinfo, sk, 1); __inet_hash(hashinfo, sk);
local_bh_enable(); local_bh_enable();
} }
} }
......
...@@ -408,7 +408,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -408,7 +408,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
dccp_sync_mss(newsk, dst_mtu(dst)); dccp_sync_mss(newsk, dst_mtu(dst));
__inet_hash(&dccp_hashinfo, newsk, 0); __inet_hash_nolisten(&dccp_hashinfo, newsk);
__inet_inherit_port(&dccp_hashinfo, sk, newsk); __inet_inherit_port(&dccp_hashinfo, sk, newsk);
return newsk; return newsk;
......
...@@ -267,6 +267,48 @@ static inline u32 inet_sk_port_offset(const struct sock *sk) ...@@ -267,6 +267,48 @@ static inline u32 inet_sk_port_offset(const struct sock *sk)
inet->dport); inet->dport);
} }
void __inet_hash_nolisten(struct inet_hashinfo *hashinfo, struct sock *sk)
{
struct hlist_head *list;
rwlock_t *lock;
struct inet_ehash_bucket *head;
BUG_TRAP(sk_unhashed(sk));
sk->sk_hash = inet_sk_ehashfn(sk);
head = inet_ehash_bucket(hashinfo, sk->sk_hash);
list = &head->chain;
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
write_lock(lock);
__sk_add_node(sk, list);
sock_prot_inc_use(sk->sk_prot);
write_unlock(lock);
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
{
struct hlist_head *list;
rwlock_t *lock;
if (sk->sk_state != TCP_LISTEN) {
__inet_hash_nolisten(hashinfo, sk);
return;
}
BUG_TRAP(sk_unhashed(sk));
list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
lock = &hashinfo->lhash_lock;
inet_listen_wlock(hashinfo);
__sk_add_node(sk, list);
sock_prot_inc_use(sk->sk_prot);
write_unlock(lock);
wake_up(&hashinfo->lhash_wait);
}
EXPORT_SYMBOL_GPL(__inet_hash);
/* /*
* Bind a port for a connect operation and hash it. * Bind a port for a connect operation and hash it.
*/ */
...@@ -334,7 +376,7 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row, ...@@ -334,7 +376,7 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row,
inet_bind_hash(sk, tb, port); inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) { if (sk_unhashed(sk)) {
inet_sk(sk)->sport = htons(port); inet_sk(sk)->sport = htons(port);
__inet_hash(hinfo, sk, 0); __inet_hash_nolisten(hinfo, sk);
} }
spin_unlock(&head->lock); spin_unlock(&head->lock);
...@@ -351,7 +393,7 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row, ...@@ -351,7 +393,7 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row,
tb = inet_csk(sk)->icsk_bind_hash; tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock); spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
__inet_hash(hinfo, sk, 0); __inet_hash_nolisten(hinfo, sk);
spin_unlock_bh(&head->lock); spin_unlock_bh(&head->lock);
return 0; return 0;
} else { } else {
......
...@@ -1478,7 +1478,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -1478,7 +1478,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
} }
#endif #endif
__inet_hash(&tcp_hashinfo, newsk, 0); __inet_hash_nolisten(&tcp_hashinfo, newsk);
__inet_inherit_port(&tcp_hashinfo, sk, newsk); __inet_inherit_port(&tcp_hashinfo, sk, newsk);
return newsk; return newsk;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册