提交 7d2b5548 编写于 作者: M Marek Lindner

batman-adv: Correct rcu refcounting for softif_neigh

It might be possible that 2 threads access the same data in the same
rcu grace period. The first thread calls call_rcu() to decrement the
refcount and free the data while the second thread increases the
refcount to use the data. To avoid this race condition all refcount
operations have to be atomic.
Reported-by: NSven Eckelmann <sven@narfation.org>
Signed-off-by: NMarek Lindner <lindner_marek@yahoo.de>
上级 25b6d3c1
...@@ -76,20 +76,18 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len) ...@@ -76,20 +76,18 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
return 0; return 0;
} }
static void softif_neigh_free_ref(struct kref *refcount) static void softif_neigh_free_rcu(struct rcu_head *rcu)
{ {
struct softif_neigh *softif_neigh; struct softif_neigh *softif_neigh;
softif_neigh = container_of(refcount, struct softif_neigh, refcount); softif_neigh = container_of(rcu, struct softif_neigh, rcu);
kfree(softif_neigh); kfree(softif_neigh);
} }
static void softif_neigh_free_rcu(struct rcu_head *rcu) static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
{ {
struct softif_neigh *softif_neigh; if (atomic_dec_and_test(&softif_neigh->refcount))
call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
softif_neigh = container_of(rcu, struct softif_neigh, rcu);
kref_put(&softif_neigh->refcount, softif_neigh_free_ref);
} }
void softif_neigh_purge(struct bat_priv *bat_priv) void softif_neigh_purge(struct bat_priv *bat_priv)
...@@ -116,11 +114,10 @@ void softif_neigh_purge(struct bat_priv *bat_priv) ...@@ -116,11 +114,10 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
softif_neigh->addr, softif_neigh->vid); softif_neigh->addr, softif_neigh->vid);
softif_neigh_tmp = bat_priv->softif_neigh; softif_neigh_tmp = bat_priv->softif_neigh;
bat_priv->softif_neigh = NULL; bat_priv->softif_neigh = NULL;
kref_put(&softif_neigh_tmp->refcount, softif_neigh_free_ref(softif_neigh_tmp);
softif_neigh_free_ref);
} }
call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); softif_neigh_free_ref(softif_neigh);
} }
spin_unlock_bh(&bat_priv->softif_neigh_lock); spin_unlock_bh(&bat_priv->softif_neigh_lock);
...@@ -141,8 +138,11 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, ...@@ -141,8 +138,11 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
if (softif_neigh->vid != vid) if (softif_neigh->vid != vid)
continue; continue;
if (!atomic_inc_not_zero(&softif_neigh->refcount))
continue;
softif_neigh->last_seen = jiffies; softif_neigh->last_seen = jiffies;
goto found; goto out;
} }
softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC);
...@@ -152,15 +152,14 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, ...@@ -152,15 +152,14 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
memcpy(softif_neigh->addr, addr, ETH_ALEN); memcpy(softif_neigh->addr, addr, ETH_ALEN);
softif_neigh->vid = vid; softif_neigh->vid = vid;
softif_neigh->last_seen = jiffies; softif_neigh->last_seen = jiffies;
kref_init(&softif_neigh->refcount); /* initialize with 2 - caller decrements counter by one */
atomic_set(&softif_neigh->refcount, 2);
INIT_HLIST_NODE(&softif_neigh->list); INIT_HLIST_NODE(&softif_neigh->list);
spin_lock_bh(&bat_priv->softif_neigh_lock); spin_lock_bh(&bat_priv->softif_neigh_lock);
hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list); hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list);
spin_unlock_bh(&bat_priv->softif_neigh_lock); spin_unlock_bh(&bat_priv->softif_neigh_lock);
found:
kref_get(&softif_neigh->refcount);
out: out:
rcu_read_unlock(); rcu_read_unlock();
return softif_neigh; return softif_neigh;
...@@ -264,7 +263,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, ...@@ -264,7 +263,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
softif_neigh->addr, softif_neigh->vid); softif_neigh->addr, softif_neigh->vid);
softif_neigh_tmp = bat_priv->softif_neigh; softif_neigh_tmp = bat_priv->softif_neigh;
bat_priv->softif_neigh = softif_neigh; bat_priv->softif_neigh = softif_neigh;
kref_put(&softif_neigh_tmp->refcount, softif_neigh_free_ref); softif_neigh_free_ref(softif_neigh_tmp);
/* we need to hold the additional reference */ /* we need to hold the additional reference */
goto err; goto err;
} }
...@@ -282,7 +281,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, ...@@ -282,7 +281,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
} }
out: out:
kref_put(&softif_neigh->refcount, softif_neigh_free_ref); softif_neigh_free_ref(softif_neigh);
err: err:
kfree_skb(skb); kfree_skb(skb);
return; return;
......
...@@ -268,7 +268,7 @@ struct softif_neigh { ...@@ -268,7 +268,7 @@ struct softif_neigh {
uint8_t addr[ETH_ALEN]; uint8_t addr[ETH_ALEN];
unsigned long last_seen; unsigned long last_seen;
short vid; short vid;
struct kref refcount; atomic_t refcount;
struct rcu_head rcu; struct rcu_head rcu;
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册