提交 fea85cff 编写于 作者: J Joonsoo Kim 提交者: Linus Torvalds

mm/page_isolation.c: return last tested pfn rather than failure indicator

This is preparation step to report test failed pfn in new tracepoint to
analyze cma allocation failure problem.  There is no functional change
in this patch.
Signed-off-by: NJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: NDavid Rientjes <rientjes@google.com>
Acked-by: NMichal Nazarewicz <mina86@mina86.com>
Cc: Minchan Kim <minchan@kernel.org>
Acked-by: NVlastimil Babka <vbabka@suse.cz>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 4a8c7bb5
...@@ -212,7 +212,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -212,7 +212,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
* *
* Returns 1 if all pages in the range are isolated. * Returns 1 if all pages in the range are isolated.
*/ */
static int static unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages) bool skip_hwpoisoned_pages)
{ {
...@@ -237,9 +237,8 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, ...@@ -237,9 +237,8 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
else else
break; break;
} }
if (pfn < end_pfn)
return 0; return pfn;
return 1;
} }
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
...@@ -248,7 +247,6 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, ...@@ -248,7 +247,6 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
unsigned long pfn, flags; unsigned long pfn, flags;
struct page *page; struct page *page;
struct zone *zone; struct zone *zone;
int ret;
/* /*
* Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
...@@ -266,10 +264,11 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, ...@@ -266,10 +264,11 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
/* Check all pages are free or marked as ISOLATED */ /* Check all pages are free or marked as ISOLATED */
zone = page_zone(page); zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn, pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
skip_hwpoisoned_pages); skip_hwpoisoned_pages);
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
return ret ? 0 : -EBUSY;
return pfn < end_pfn ? -EBUSY : 0;
} }
struct page *alloc_migrate_target(struct page *page, unsigned long private, struct page *alloc_migrate_target(struct page *page, unsigned long private,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册