From cf675acb743f045b7482384be60987e69260e43d Mon Sep 17 00:00:00 2001 From: Ganesh Mahendran Date: Thu, 28 Jul 2016 15:47:46 -0700 Subject: [PATCH] mm/zsmalloc: take obj index back from find_alloced_obj the obj index value should be updated after return from find_alloced_obj() to avoid CPU burning caused by unnecessary object scanning. Link: http://lkml.kernel.org/r/1467882338-4300-2-git-send-email-opensource.ganesh@gmail.com Signed-off-by: Ganesh Mahendran Reviewed-by: Sergey Senozhatsky Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 0b4790e81193..49143de9934c 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1741,10 +1741,11 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, * return handle. */ static unsigned long find_alloced_obj(struct size_class *class, - struct page *page, int index) + struct page *page, int *obj_idx) { unsigned long head; int offset = 0; + int index = *obj_idx; unsigned long handle = 0; void *addr = kmap_atomic(page); @@ -1765,6 +1766,9 @@ static unsigned long find_alloced_obj(struct size_class *class, } kunmap_atomic(addr); + + *obj_idx = index; + return handle; } @@ -1790,7 +1794,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, int ret = 0; while (1) { - handle = find_alloced_obj(class, s_page, obj_idx); + handle = find_alloced_obj(class, s_page, &obj_idx); if (!handle) { s_page = get_next_page(s_page); if (!s_page) -- GitLab