提交 310ebbba 编写于 作者: Y Yotam Gigi 提交者: David S. Miller

ipmr: Add reference count to MFC entries

Next commits will introduce MFC notifications through the atomic
fib_notification chain, thus allowing modules to be aware of MFC entries.

Due to the fact that modules may need to hold a reference to an MFC entry,
add reference count to MFC entries to prevent them from being freed while
these modules use them.

The reference counting is done only on resolved MFC entries currently.
Signed-off-by: NYotam Gigi <yotamg@mellanox.com>
Reviewed-by: NIdo Schimmel <idosch@mellanox.com>
Signed-off-by: NJiri Pirko <jiri@mellanox.com>
Reviewed-by: NNikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 85e48228
......@@ -109,6 +109,7 @@ struct mfc_cache_cmp_arg {
* @wrong_if: number of wrong source interface hits
* @lastuse: time of last use of the group (traffic or update)
* @ttls: OIF TTL threshold array
* @refcount: reference count for this entry
* @list: global entry list
* @rcu: used for entry destruction
*/
......@@ -138,6 +139,7 @@ struct mfc_cache {
unsigned long wrong_if;
unsigned long lastuse;
unsigned char ttls[MAXVIFS];
refcount_t refcount;
} res;
} mfc_un;
struct list_head list;
......@@ -148,4 +150,23 @@ struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
struct rtmsg *rtm, u32 portid);
#ifdef CONFIG_IP_MROUTE
void ipmr_cache_free(struct mfc_cache *mfc_cache);
#else
static inline void ipmr_cache_free(struct mfc_cache *mfc_cache)
{
}
#endif
static inline void ipmr_cache_put(struct mfc_cache *c)
{
if (refcount_dec_and_test(&c->mfc_un.res.refcount))
ipmr_cache_free(c);
}
static inline void ipmr_cache_hold(struct mfc_cache *c)
{
refcount_inc(&c->mfc_un.res.refcount);
}
#endif
......@@ -652,10 +652,11 @@ static void ipmr_cache_free_rcu(struct rcu_head *head)
kmem_cache_free(mrt_cachep, c);
}
static inline void ipmr_cache_free(struct mfc_cache *c)
void ipmr_cache_free(struct mfc_cache *c)
{
call_rcu(&c->rcu, ipmr_cache_free_rcu);
}
EXPORT_SYMBOL(ipmr_cache_free);
/* Destroy an unresolved cache entry, killing queued skbs
* and reporting error to netlink readers.
......@@ -949,6 +950,7 @@ static struct mfc_cache *ipmr_cache_alloc(void)
if (c) {
c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXVIFS;
refcount_set(&c->mfc_un.res.refcount, 1);
}
return c;
}
......@@ -1162,7 +1164,7 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
list_del_rcu(&c->list);
mroute_netlink_event(mrt, c, RTM_DELROUTE);
ipmr_cache_free(c);
ipmr_cache_put(c);
return 0;
}
......@@ -1264,7 +1266,7 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
list_del_rcu(&c->list);
mroute_netlink_event(mrt, c, RTM_DELROUTE);
ipmr_cache_free(c);
ipmr_cache_put(c);
}
if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册