提交 4db0acf3 编写于 作者: J Jarek Poplawski 提交者: David S. Miller

net: gen_estimator: Fix gen_kill_estimator() lookups

gen_kill_estimator() linear lists lookups are very slow, and e.g. while
deleting a large number of HTB classes soft lockups were reported. Here
is another try to fix this problem: this time internally, with rbtree,
so similarly to Jamal's hashing idea IIRC. (Looking for next hits could
be still optimized, but it's really fast as it is.)
Reported-by: NBadalian Vyacheslav <slavon@bigtelecom.ru>
Reported-by: NDenys Fedoryshchenko <denys@visp.net.lb>
Signed-off-by: NJarek Poplawski <jarkao2@gmail.com>
Acked-by: NJamal Hadi Salim <hadi@cyberus.ca>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 3f0947c3
......@@ -31,6 +31,7 @@
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rbtree.h>
#include <net/sock.h>
#include <net/gen_stats.h>
......@@ -89,6 +90,7 @@ struct gen_estimator
u32 avpps;
u32 avbps;
struct rcu_head e_rcu;
struct rb_node node;
};
struct gen_estimator_head
......@@ -102,6 +104,9 @@ static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
/* Protects against NULL dereference */
static DEFINE_RWLOCK(est_lock);
/* Protects against soft lockup during large deletion */
static struct rb_root est_root = RB_ROOT;
static void est_timer(unsigned long arg)
{
int idx = (int)arg;
......@@ -139,6 +144,45 @@ static void est_timer(unsigned long arg)
rcu_read_unlock();
}
static void gen_add_node(struct gen_estimator *est)
{
struct rb_node **p = &est_root.rb_node, *parent = NULL;
while (*p) {
struct gen_estimator *e;
parent = *p;
e = rb_entry(parent, struct gen_estimator, node);
if (est->bstats > e->bstats)
p = &parent->rb_right;
else
p = &parent->rb_left;
}
rb_link_node(&est->node, parent, p);
rb_insert_color(&est->node, &est_root);
}
static struct gen_estimator *gen_find_node(struct gnet_stats_basic *bstats,
struct gnet_stats_rate_est *rate_est)
{
struct rb_node *p = est_root.rb_node;
while (p) {
struct gen_estimator *e;
e = rb_entry(p, struct gen_estimator, node);
if (bstats > e->bstats)
p = p->rb_right;
else if (bstats < e->bstats || rate_est != e->rate_est)
p = p->rb_left;
else
return e;
}
return NULL;
}
/**
* gen_new_estimator - create a new rate estimator
* @bstats: basic statistics
......@@ -194,6 +238,8 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
list_add_rcu(&est->list, &elist[idx].list);
gen_add_node(est);
return 0;
}
......@@ -209,34 +255,24 @@ static void __gen_kill_estimator(struct rcu_head *head)
* @bstats: basic statistics
* @rate_est: rate estimator statistics
*
* Removes the rate estimator specified by &bstats and &rate_est
* and deletes the timer.
* Removes the rate estimator specified by &bstats and &rate_est.
*
* NOTE: Called under rtnl_mutex
*/
void gen_kill_estimator(struct gnet_stats_basic *bstats,
struct gnet_stats_rate_est *rate_est)
struct gnet_stats_rate_est *rate_est)
{
int idx;
struct gen_estimator *e, *n;
for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
/* Skip non initialized indexes */
if (!elist[idx].timer.function)
continue;
struct gen_estimator *e;
list_for_each_entry_safe(e, n, &elist[idx].list, list) {
if (e->rate_est != rate_est || e->bstats != bstats)
continue;
while ((e = gen_find_node(bstats, rate_est))) {
rb_erase(&e->node, &est_root);
write_lock_bh(&est_lock);
e->bstats = NULL;
write_unlock_bh(&est_lock);
write_lock_bh(&est_lock);
e->bstats = NULL;
write_unlock_bh(&est_lock);
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
}
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册