提交 70da5b5c 编写于 作者: M Martin KaFai Lau 提交者: David S. Miller

ipv6: Replace spinlock with seqlock and rcu in ip6_tunnel

This patch uses a seqlock to ensure consistency between idst->dst and
idst->cookie.  It also makes dst freeing from fib tree to undergo a
rcu grace period.
Signed-off-by: NMartin KaFai Lau <kafai@fb.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 8e3d5be7
...@@ -33,8 +33,8 @@ struct __ip6_tnl_parm { ...@@ -33,8 +33,8 @@ struct __ip6_tnl_parm {
}; };
struct ip6_tnl_dst { struct ip6_tnl_dst {
spinlock_t lock; seqlock_t lock;
struct dst_entry *dst; struct dst_entry __rcu *dst;
u32 cookie; u32 cookie;
}; };
......
...@@ -155,6 +155,11 @@ static void node_free(struct fib6_node *fn) ...@@ -155,6 +155,11 @@ static void node_free(struct fib6_node *fn)
kmem_cache_free(fib6_node_kmem, fn); kmem_cache_free(fib6_node_kmem, fn);
} }
static void rt6_rcu_free(struct rt6_info *rt)
{
call_rcu(&rt->dst.rcu_head, dst_rcu_free);
}
static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
{ {
int cpu; int cpu;
...@@ -169,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) ...@@ -169,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
pcpu_rt = *ppcpu_rt; pcpu_rt = *ppcpu_rt;
if (pcpu_rt) { if (pcpu_rt) {
dst_free(&pcpu_rt->dst); rt6_rcu_free(pcpu_rt);
*ppcpu_rt = NULL; *ppcpu_rt = NULL;
} }
} }
...@@ -181,7 +186,7 @@ static void rt6_release(struct rt6_info *rt) ...@@ -181,7 +186,7 @@ static void rt6_release(struct rt6_info *rt)
{ {
if (atomic_dec_and_test(&rt->rt6i_ref)) { if (atomic_dec_and_test(&rt->rt6i_ref)) {
rt6_free_pcpu(rt); rt6_free_pcpu(rt);
dst_free(&rt->dst); rt6_rcu_free(rt);
} }
} }
......
...@@ -126,45 +126,48 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev) ...@@ -126,45 +126,48 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
* Locking : hash tables are protected by RCU and RTNL * Locking : hash tables are protected by RCU and RTNL
*/ */
static void __ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst, static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
struct dst_entry *dst) struct dst_entry *dst)
{ {
dst_release(idst->dst); write_seqlock_bh(&idst->lock);
dst_release(rcu_dereference_protected(
idst->dst,
lockdep_is_held(&idst->lock.lock)));
if (dst) { if (dst) {
dst_hold(dst); dst_hold(dst);
idst->cookie = rt6_get_cookie((struct rt6_info *)dst); idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
} else { } else {
idst->cookie = 0; idst->cookie = 0;
} }
idst->dst = dst; rcu_assign_pointer(idst->dst, dst);
} write_sequnlock_bh(&idst->lock);
static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
struct dst_entry *dst)
{
spin_lock_bh(&idst->lock);
__ip6_tnl_per_cpu_dst_set(idst, dst);
spin_unlock_bh(&idst->lock);
} }
struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t) struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
{ {
struct ip6_tnl_dst *idst; struct ip6_tnl_dst *idst;
struct dst_entry *dst; struct dst_entry *dst;
unsigned int seq;
u32 cookie;
idst = raw_cpu_ptr(t->dst_cache); idst = raw_cpu_ptr(t->dst_cache);
spin_lock_bh(&idst->lock);
dst = idst->dst; rcu_read_lock();
if (dst) { do {
if (!dst->obsolete || dst->ops->check(dst, idst->cookie)) { seq = read_seqbegin(&idst->lock);
dst_hold(idst->dst); dst = rcu_dereference(idst->dst);
} else { cookie = idst->cookie;
__ip6_tnl_per_cpu_dst_set(idst, NULL); } while (read_seqretry(&idst->lock, seq));
dst = NULL;
} if (dst && !atomic_inc_not_zero(&dst->__refcnt))
dst = NULL;
rcu_read_unlock();
if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
ip6_tnl_per_cpu_dst_set(idst, NULL);
dst_release(dst);
dst = NULL;
} }
spin_unlock_bh(&idst->lock);
return dst; return dst;
} }
EXPORT_SYMBOL_GPL(ip6_tnl_dst_get); EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
...@@ -204,7 +207,7 @@ int ip6_tnl_dst_init(struct ip6_tnl *t) ...@@ -204,7 +207,7 @@ int ip6_tnl_dst_init(struct ip6_tnl *t)
return -ENOMEM; return -ENOMEM;
for_each_possible_cpu(i) for_each_possible_cpu(i)
spin_lock_init(&per_cpu_ptr(t->dst_cache, i)->lock); seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册