提交 4aa409ca 编写于 作者: M Minchan Kim 提交者: Linus Torvalds

zsmalloc: separate free_zspage from putback_zspage

Currently, putback_zspage does free zspage under class->lock if fullness
become ZS_EMPTY but it makes trouble to implement locking scheme for new
zspage migration.  So, this patch is to separate free_zspage from
putback_zspage and free zspage out of class->lock which is preparation
for zspage migration.

Link: http://lkml.kernel.org/r/1464736881-24886-10-git-send-email-minchan@kernel.orgSigned-off-by: NMinchan Kim <minchan@kernel.org>
Reviewed-by: NSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 3783689a
...@@ -1687,14 +1687,12 @@ static struct zspage *isolate_zspage(struct size_class *class, bool source) ...@@ -1687,14 +1687,12 @@ static struct zspage *isolate_zspage(struct size_class *class, bool source)
/* /*
* putback_zspage - add @zspage into right class's fullness list * putback_zspage - add @zspage into right class's fullness list
* @pool: target pool
* @class: destination class * @class: destination class
* @zspage: target page * @zspage: target page
* *
* Return @zspage's fullness_group * Return @zspage's fullness_group
*/ */
static enum fullness_group putback_zspage(struct zs_pool *pool, static enum fullness_group putback_zspage(struct size_class *class,
struct size_class *class,
struct zspage *zspage) struct zspage *zspage)
{ {
enum fullness_group fullness; enum fullness_group fullness;
...@@ -1703,15 +1701,6 @@ static enum fullness_group putback_zspage(struct zs_pool *pool, ...@@ -1703,15 +1701,6 @@ static enum fullness_group putback_zspage(struct zs_pool *pool,
insert_zspage(class, zspage, fullness); insert_zspage(class, zspage, fullness);
set_zspage_mapping(zspage, class->index, fullness); set_zspage_mapping(zspage, class->index, fullness);
if (fullness == ZS_EMPTY) {
zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
class->size, class->pages_per_zspage));
atomic_long_sub(class->pages_per_zspage,
&pool->pages_allocated);
free_zspage(pool, zspage);
}
return fullness; return fullness;
} }
...@@ -1760,23 +1749,29 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) ...@@ -1760,23 +1749,29 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
if (!migrate_zspage(pool, class, &cc)) if (!migrate_zspage(pool, class, &cc))
break; break;
putback_zspage(pool, class, dst_zspage); putback_zspage(class, dst_zspage);
} }
/* Stop if we couldn't find slot */ /* Stop if we couldn't find slot */
if (dst_zspage == NULL) if (dst_zspage == NULL)
break; break;
putback_zspage(pool, class, dst_zspage); putback_zspage(class, dst_zspage);
if (putback_zspage(pool, class, src_zspage) == ZS_EMPTY) if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
class->size, class->pages_per_zspage));
atomic_long_sub(class->pages_per_zspage,
&pool->pages_allocated);
free_zspage(pool, src_zspage);
pool->stats.pages_compacted += class->pages_per_zspage; pool->stats.pages_compacted += class->pages_per_zspage;
}
spin_unlock(&class->lock); spin_unlock(&class->lock);
cond_resched(); cond_resched();
spin_lock(&class->lock); spin_lock(&class->lock);
} }
if (src_zspage) if (src_zspage)
putback_zspage(pool, class, src_zspage); putback_zspage(class, src_zspage);
spin_unlock(&class->lock); spin_unlock(&class->lock);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册