From 2def7424c9be0069831380823fdb5cf72103b919 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 5 Nov 2015 18:49:46 -0800 Subject: [PATCH] mm: page migration use the put_new_page whenever necessary I don't know of any problem from the way it's used in our current tree, but there is one defect in page migration's custom put_new_page feature. An unused newpage is expected to be released with the put_new_page(), but there was one MIGRATEPAGE_SUCCESS (0) path which released it with putback_lru_page(): which can be very wrong for a custom pool. Fixed more easily by resetting put_new_page once it won't be needed, than by adding a further flag to modify the rc test. Signed-off-by: Hugh Dickins Cc: Christoph Lameter Cc: "Kirill A. Shutemov" Cc: Rik van Riel Acked-by: Vlastimil Babka Cc: Davidlohr Bueso Cc: Oleg Nesterov Cc: Sasha Levin Cc: Dmitry Vyukov Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index d149cbb67a34..2f2e2236daf7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -938,10 +938,11 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, int force, enum migrate_mode mode, enum migrate_reason reason) { - int rc = 0; + int rc = MIGRATEPAGE_SUCCESS; int *result = NULL; - struct page *newpage = get_new_page(page, private, &result); + struct page *newpage; + newpage = get_new_page(page, private, &result); if (!newpage) return -ENOMEM; @@ -955,6 +956,8 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, goto out; rc = __unmap_and_move(page, newpage, force, mode); + if (rc == MIGRATEPAGE_SUCCESS) + put_new_page = NULL; out: if (rc != -EAGAIN) { @@ -981,7 +984,7 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, * it. Otherwise, putback_lru_page() will drop the reference grabbed * during isolation. */ - if (rc != MIGRATEPAGE_SUCCESS && put_new_page) { + if (put_new_page) { ClearPageSwapBacked(newpage); put_new_page(newpage, private); } else if (unlikely(__is_movable_balloon_page(newpage))) { @@ -1022,7 +1025,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, struct page *hpage, int force, enum migrate_mode mode) { - int rc = 0; + int rc = -EAGAIN; int *result = NULL; int page_was_mapped = 0; struct page *new_hpage; @@ -1044,8 +1047,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, if (!new_hpage) return -ENOMEM; - rc = -EAGAIN; - if (!trylock_page(hpage)) { if (!force || mode != MIGRATE_SYNC) goto out; @@ -1070,8 +1071,10 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, if (anon_vma) put_anon_vma(anon_vma); - if (rc == MIGRATEPAGE_SUCCESS) + if (rc == MIGRATEPAGE_SUCCESS) { hugetlb_cgroup_migrate(hpage, new_hpage); + put_new_page = NULL; + } unlock_page(hpage); out: @@ -1083,7 +1086,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, * it. Otherwise, put_page() will drop the reference grabbed during * isolation. */ - if (rc != MIGRATEPAGE_SUCCESS && put_new_page) + if (put_new_page) put_new_page(new_hpage, private); else putback_active_hugepage(new_hpage); -- GitLab