提交 fe92a055 编写于 作者: P Peng Tao 提交者: Al Viro

staging/lustre/obdclass: convert lu_object shrinker to count/scan API

convert lu_object shrinker to new count/scan API.
Signed-off-by: NPeng Tao <tao.peng@emc.com>
Signed-off-by: NAndreas Dilger <andreas.dilger@intel.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Dave Chinner <dchinner@redhat.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
上级 cbc3769e
......@@ -1779,7 +1779,6 @@ int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
}
EXPORT_SYMBOL(lu_env_refill_by_tags);
static struct shrinker *lu_site_shrinker = NULL;
typedef struct lu_site_stats{
unsigned lss_populated;
......@@ -1835,19 +1834,39 @@ static void lu_site_stats_get(cfs_hash_t *hs,
* objects without taking the lu_sites_guard lock, but this is not
* possible in the current implementation.
*/
static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
static unsigned long lu_cache_shrink_count(struct shrinker *sk,
struct shrink_control *sc)
{
lu_site_stats_t stats;
struct lu_site *s;
struct lu_site *tmp;
int cached = 0;
int remain = shrink_param(sc, nr_to_scan);
unsigned long cached = 0;
if (!(sc->gfp_mask & __GFP_FS))
return 0;
mutex_lock(&lu_sites_guard);
list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
memset(&stats, 0, sizeof(stats));
lu_site_stats_get(s->ls_obj_hash, &stats, 0);
cached += stats.lss_total - stats.lss_busy;
}
mutex_unlock(&lu_sites_guard);
cached = (cached / 100) * sysctl_vfs_cache_pressure;
CDEBUG(D_INODE, "%ld objects cached\n", cached);
return cached;
}
static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
struct shrink_control *sc)
{
struct lu_site *s;
struct lu_site *tmp;
unsigned long remain = sc->nr_to_scan, freed = 0;
LIST_HEAD(splice);
if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
if (remain != 0)
return -1;
else
if (!(sc->gfp_mask & __GFP_FS))
/* We must not take the lu_sites_guard lock when
* __GFP_FS is *not* set because of the deadlock
* possibility detailed above. Additionally,
......@@ -1859,35 +1878,22 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
* reclaim objects when __GFP_FS is *not* set
* anyways.
*/
return 0;
}
CDEBUG(D_INODE, "Shrink %d objects\n", remain);
return SHRINK_STOP;
mutex_lock(&lu_sites_guard);
list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
if (shrink_param(sc, nr_to_scan) != 0) {
remain = lu_site_purge(&lu_shrink_env, s, remain);
freed = lu_site_purge(&lu_shrink_env, s, remain);
remain -= freed;
/*
* Move just shrunk site to the tail of site list to
* assure shrinking fairness.
*/
list_move_tail(&s->ls_linkage, &splice);
}
memset(&stats, 0, sizeof(stats));
lu_site_stats_get(s->ls_obj_hash, &stats, 0);
cached += stats.lss_total - stats.lss_busy;
if (shrink_param(sc, nr_to_scan) && remain <= 0)
break;
}
list_splice(&splice, lu_sites.prev);
mutex_unlock(&lu_sites_guard);
cached = (cached / 100) * sysctl_vfs_cache_pressure;
if (shrink_param(sc, nr_to_scan) == 0)
CDEBUG(D_INODE, "%d objects cached\n", cached);
return cached;
return sc->nr_to_scan - remain;
}
/*
......@@ -1913,6 +1919,12 @@ int lu_printk_printer(const struct lu_env *env,
return 0;
}
static struct shrinker lu_site_shrinker = {
.count_objects = lu_cache_shrink_count,
.scan_objects = lu_cache_shrink_scan,
.seeks = DEFAULT_SEEKS,
};
/**
* Initialization of global lu_* data.
*/
......@@ -1947,9 +1959,7 @@ int lu_global_init(void)
* inode, one for ea. Unfortunately setting this high value results in
* lu_object/inode cache consuming all the memory.
*/
lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
if (lu_site_shrinker == NULL)
return -ENOMEM;
register_shrinker(&lu_site_shrinker);
return result;
}
......@@ -1959,11 +1969,7 @@ int lu_global_init(void)
*/
void lu_global_fini(void)
{
if (lu_site_shrinker != NULL) {
remove_shrinker(lu_site_shrinker);
lu_site_shrinker = NULL;
}
unregister_shrinker(&lu_site_shrinker);
lu_context_key_degister(&lu_global_key);
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册