提交 d9ff3049 编写于 作者: K Kirill Tkhai 提交者: David S. Miller

net: Replace ip_ra_lock with per-net mutex

Since ra_chain is per-net, we may use per-net mutexes
to protect them in ip_ra_control(). This improves
scalability.
Signed-off-by: NKirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 5796ef75
...@@ -50,6 +50,7 @@ struct netns_ipv4 { ...@@ -50,6 +50,7 @@ struct netns_ipv4 {
struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_all;
struct ipv4_devconf *devconf_dflt; struct ipv4_devconf *devconf_dflt;
struct ip_ra_chain __rcu *ra_chain; struct ip_ra_chain __rcu *ra_chain;
struct mutex ra_mutex;
#ifdef CONFIG_IP_MULTIPLE_TABLES #ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops; struct fib_rules_ops *rules_ops;
bool fib_has_custom_rules; bool fib_has_custom_rules;
......
...@@ -301,6 +301,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) ...@@ -301,6 +301,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
net->user_ns = user_ns; net->user_ns = user_ns;
idr_init(&net->netns_ids); idr_init(&net->netns_ids);
spin_lock_init(&net->nsid_lock); spin_lock_init(&net->nsid_lock);
mutex_init(&net->ipv4.ra_mutex);
list_for_each_entry(ops, &pernet_list, list) { list_for_each_entry(ops, &pernet_list, list) {
error = ops_init(ops, net); error = ops_init(ops, net);
......
...@@ -322,9 +322,6 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, ...@@ -322,9 +322,6 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
return 0; return 0;
} }
static DEFINE_SPINLOCK(ip_ra_lock);
static void ip_ra_destroy_rcu(struct rcu_head *head) static void ip_ra_destroy_rcu(struct rcu_head *head)
{ {
struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu); struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
...@@ -345,21 +342,21 @@ int ip_ra_control(struct sock *sk, unsigned char on, ...@@ -345,21 +342,21 @@ int ip_ra_control(struct sock *sk, unsigned char on,
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
spin_lock_bh(&ip_ra_lock); mutex_lock(&net->ipv4.ra_mutex);
for (rap = &net->ipv4.ra_chain; for (rap = &net->ipv4.ra_chain;
(ra = rcu_dereference_protected(*rap, (ra = rcu_dereference_protected(*rap,
lockdep_is_held(&ip_ra_lock))) != NULL; lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
rap = &ra->next) { rap = &ra->next) {
if (ra->sk == sk) { if (ra->sk == sk) {
if (on) { if (on) {
spin_unlock_bh(&ip_ra_lock); mutex_unlock(&net->ipv4.ra_mutex);
kfree(new_ra); kfree(new_ra);
return -EADDRINUSE; return -EADDRINUSE;
} }
/* dont let ip_call_ra_chain() use sk again */ /* dont let ip_call_ra_chain() use sk again */
ra->sk = NULL; ra->sk = NULL;
RCU_INIT_POINTER(*rap, ra->next); RCU_INIT_POINTER(*rap, ra->next);
spin_unlock_bh(&ip_ra_lock); mutex_unlock(&net->ipv4.ra_mutex);
if (ra->destructor) if (ra->destructor)
ra->destructor(sk); ra->destructor(sk);
...@@ -374,7 +371,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, ...@@ -374,7 +371,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
} }
} }
if (!new_ra) { if (!new_ra) {
spin_unlock_bh(&ip_ra_lock); mutex_unlock(&net->ipv4.ra_mutex);
return -ENOBUFS; return -ENOBUFS;
} }
new_ra->sk = sk; new_ra->sk = sk;
...@@ -383,7 +380,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, ...@@ -383,7 +380,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
RCU_INIT_POINTER(new_ra->next, ra); RCU_INIT_POINTER(new_ra->next, ra);
rcu_assign_pointer(*rap, new_ra); rcu_assign_pointer(*rap, new_ra);
sock_hold(sk); sock_hold(sk);
spin_unlock_bh(&ip_ra_lock); mutex_unlock(&net->ipv4.ra_mutex);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册