提交 3a48ee8a 编写于 作者: A Andreas Gruenbacher 提交者: Al Viro

mbcache: Limit the maximum number of cache entries

Limit the maximum number of mb_cache entries depending on the number of
hash buckets: if the only limit to the number of cache entries is the
available memory the hash chains can grow very long, taking a long time
to search.

At least partially solves https://bugzilla.lustre.org/show_bug.cgi?id=22771.
Signed-off-by: NAndreas Gruenbacher <agruen@suse.de>
Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
上级 3b6036d1
...@@ -80,6 +80,7 @@ struct mb_cache { ...@@ -80,6 +80,7 @@ struct mb_cache {
struct list_head c_cache_list; struct list_head c_cache_list;
const char *c_name; const char *c_name;
atomic_t c_entry_count; atomic_t c_entry_count;
int c_max_entries;
int c_bucket_bits; int c_bucket_bits;
struct kmem_cache *c_entry_cache; struct kmem_cache *c_entry_cache;
struct list_head *c_block_hash; struct list_head *c_block_hash;
...@@ -243,6 +244,12 @@ mb_cache_create(const char *name, int bucket_bits) ...@@ -243,6 +244,12 @@ mb_cache_create(const char *name, int bucket_bits)
if (!cache->c_entry_cache) if (!cache->c_entry_cache)
goto fail2; goto fail2;
/*
* Set an upper limit on the number of cache entries so that the hash
* chains won't grow too long.
*/
cache->c_max_entries = bucket_count << 4;
spin_lock(&mb_cache_spinlock); spin_lock(&mb_cache_spinlock);
list_add(&cache->c_cache_list, &mb_cache_list); list_add(&cache->c_cache_list, &mb_cache_list);
spin_unlock(&mb_cache_spinlock); spin_unlock(&mb_cache_spinlock);
...@@ -333,7 +340,6 @@ mb_cache_destroy(struct mb_cache *cache) ...@@ -333,7 +340,6 @@ mb_cache_destroy(struct mb_cache *cache)
kfree(cache); kfree(cache);
} }
/* /*
* mb_cache_entry_alloc() * mb_cache_entry_alloc()
* *
...@@ -345,17 +351,29 @@ mb_cache_destroy(struct mb_cache *cache) ...@@ -345,17 +351,29 @@ mb_cache_destroy(struct mb_cache *cache)
struct mb_cache_entry * struct mb_cache_entry *
mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
{ {
struct mb_cache_entry *ce; struct mb_cache_entry *ce = NULL;
ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
if (ce) { spin_lock(&mb_cache_spinlock);
if (!list_empty(&mb_cache_lru_list)) {
ce = list_entry(mb_cache_lru_list.next,
struct mb_cache_entry, e_lru_list);
list_del_init(&ce->e_lru_list);
__mb_cache_entry_unhash(ce);
}
spin_unlock(&mb_cache_spinlock);
}
if (!ce) {
ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
if (!ce)
return NULL;
atomic_inc(&cache->c_entry_count); atomic_inc(&cache->c_entry_count);
INIT_LIST_HEAD(&ce->e_lru_list); INIT_LIST_HEAD(&ce->e_lru_list);
INIT_LIST_HEAD(&ce->e_block_list); INIT_LIST_HEAD(&ce->e_block_list);
ce->e_cache = cache; ce->e_cache = cache;
ce->e_used = 1 + MB_CACHE_WRITER;
ce->e_queued = 0; ce->e_queued = 0;
} }
ce->e_used = 1 + MB_CACHE_WRITER;
return ce; return ce;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册