提交 051bbb34 编写于 作者: M Miaohe Lin 提交者: zhaoxiaoqiang11

mm/migration: return errno when isolate_huge_page failed

stable inclusion
from stable-v5.10.168
commit 97a5104d640da5867dd55243b8300a3867da90a9
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I7URR4

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=97a5104d640da5867dd55243b8300a3867da90a9

----------------------------------------------------

[ Upstream commit 7ce82f4c ]

We might fail to isolate huge page due to e.g.  the page is under
migration which cleared HPageMigratable.  We should return errno in this
case rather than always return 1 which could confuse the user, i.e.  the
caller might think all of the memory is migrated while the hugetlb page is
left behind.  We make the prototype of isolate_huge_page consistent with
isolate_lru_page as suggested by Huang Ying and rename isolate_huge_page
to isolate_hugetlb as suggested by Muchun to improve the readability.

Link: https://lkml.kernel.org/r/20220530113016.16663-4-linmiaohe@huawei.com
Fixes: e8db67eb ("mm: migrate: move_pages() supports thp migration")
Signed-off-by: NMiaohe Lin <linmiaohe@huawei.com>
Suggested-by: NHuang Ying <ying.huang@intel.com>
Reported-by: kernel test robot <lkp@intel.com> (build error)
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Stable-dep-of: 73bdf65e ("migrate: hugetlb: check for hugetlb shared PMD in node migration")
Signed-off-by: NSasha Levin <sashal@kernel.org>
Signed-off-by: Nzhaoxiaoqiang11 <zhaoxiaoqiang11@jd.com>
上级 68ef2bcf
...@@ -162,7 +162,7 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to, ...@@ -162,7 +162,7 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
vm_flags_t vm_flags); vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed); long freed);
bool isolate_huge_page(struct page *page, struct list_head *list); int isolate_hugetlb(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page); void putback_active_hugepage(struct page *page);
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page); void free_huge_page(struct page *page);
...@@ -348,9 +348,9 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, ...@@ -348,9 +348,9 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return NULL; return NULL;
} }
static inline bool isolate_huge_page(struct page *page, struct list_head *list) static inline int isolate_hugetlb(struct page *page, struct list_head *list)
{ {
return false; return -EBUSY;
} }
static inline void putback_active_hugepage(struct page *page) static inline void putback_active_hugepage(struct page *page)
......
...@@ -1718,7 +1718,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, ...@@ -1718,7 +1718,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
*/ */
if (is_migrate_cma_page(head)) { if (is_migrate_cma_page(head)) {
if (PageHuge(head)) { if (PageHuge(head)) {
if (!isolate_huge_page(head, &cma_page_list)) if (isolate_hugetlb(head, &cma_page_list))
isolation_error_count++; isolation_error_count++;
} else { } else {
if (!PageLRU(head) && drain_allow) { if (!PageLRU(head) && drain_allow) {
......
...@@ -6154,15 +6154,15 @@ follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int fla ...@@ -6154,15 +6154,15 @@ follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int fla
return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT); return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
} }
bool isolate_huge_page(struct page *page, struct list_head *list) int isolate_hugetlb(struct page *page, struct list_head *list)
{ {
bool ret = true; int ret = 0;
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
if (!PageHeadHuge(page) || if (!PageHeadHuge(page) ||
!HPageMigratable(page) || !HPageMigratable(page) ||
!get_page_unless_zero(page)) { !get_page_unless_zero(page)) {
ret = false; ret = -EBUSY;
goto unlock; goto unlock;
} }
ClearHPageMigratable(page); ClearHPageMigratable(page);
......
...@@ -1991,7 +1991,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist) ...@@ -1991,7 +1991,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
bool lru = PageLRU(page); bool lru = PageLRU(page);
if (PageHuge(page)) { if (PageHuge(page)) {
isolated = isolate_huge_page(page, pagelist); isolated = !isolate_hugetlb(page, pagelist);
} else { } else {
if (lru) if (lru)
isolated = !isolate_lru_page(page); isolated = !isolate_lru_page(page);
......
...@@ -1178,7 +1178,7 @@ int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) ...@@ -1178,7 +1178,7 @@ int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (PageHuge(page)) { if (PageHuge(page)) {
pfn = page_to_pfn(head) + compound_nr(head) - 1; pfn = page_to_pfn(head) + compound_nr(head) - 1;
isolate_huge_page(head, &source); isolate_hugetlb(head, &source);
continue; continue;
} else if (PageTransHuge(page)) } else if (PageTransHuge(page))
pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
......
...@@ -663,7 +663,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, ...@@ -663,7 +663,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
if (flags & (MPOL_MF_MOVE_ALL) || if (flags & (MPOL_MF_MOVE_ALL) ||
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
if (!isolate_huge_page(page, qp->pagelist) && if (isolate_hugetlb(page, qp->pagelist) &&
(flags & MPOL_MF_STRICT)) (flags & MPOL_MF_STRICT))
/* /*
* Failed to isolate page but allow migrating pages * Failed to isolate page but allow migrating pages
......
...@@ -164,7 +164,7 @@ void putback_movable_page(struct page *page) ...@@ -164,7 +164,7 @@ void putback_movable_page(struct page *page)
* *
* This function shall be used whenever the isolated pageset has been * This function shall be used whenever the isolated pageset has been
* built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
* and isolate_huge_page(). * and isolate_hugetlb().
*/ */
void putback_movable_pages(struct list_head *l) void putback_movable_pages(struct list_head *l)
{ {
...@@ -1652,8 +1652,9 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, ...@@ -1652,8 +1652,9 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
if (PageHuge(page)) { if (PageHuge(page)) {
if (PageHead(page)) { if (PageHead(page)) {
isolate_huge_page(page, pagelist); err = isolate_hugetlb(page, pagelist);
err = 1; if (!err)
err = 1;
} }
} else { } else {
struct page *head; struct page *head;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册