提交 80aa4777 编写于 作者: A Alex Shi 提交者: Caspar Zhang

mm/page_isolation.c: convert SKIP_HWPOISON to MEMORY_OFFLINE

task #29077503
commit 756d25be457fc5497da0ceee0f3d0c9eb4d8535d upstream
We have two types of users of page isolation:

 1. Memory offlining:  Offline memory so it can be unplugged. Memory
                       won't be touched.

 2. Memory allocation: Allocate memory (e.g., alloc_contig_range()) to
                       become the owner of the memory and make use of
                       it.

For example, in case we want to offline memory, we can ignore (skip
over) PageHWPoison() pages, as the memory won't get used.  We can allow
to offline memory.  In contrast, we don't want to allow to allocate such
memory.

Let's generalize the approach so we can special case other types of
pages we want to skip over in case we offline memory.  While at it, also
pass the same flags to test_pages_isolated().
Original-by: NDavid Hildenbrand <david@redhat.com>
Link: http://lkml.kernel.org/r/20191021172353.3056-3-david@redhat.comSigned-off-by: NDavid Hildenbrand <david@redhat.com>
Suggested-by: NMichal Hocko <mhocko@suse.com>
Acked-by: NMichal Hocko <mhocko@suse.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Pingfan Liu <kernelfans@gmail.com>
Cc: Qian Cai <cai@lca.pw>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Pavel Tatashin <pavel.tatashin@microsoft.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
(cherry picked from ccommit 756d25be457fc5497da0ceee0f3d0c9eb4d8535d)
Signed-off-by: NAlex Shi <alex.shi@linux.alibaba.com>
Reviewed-by: NYang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: NAlex Shi <alex.shi@linux.alibaba.com>

Conflicts:
	reenable patch context on all files.
上级 5316eb6e
...@@ -30,7 +30,7 @@ static inline bool is_migrate_isolate(int migratetype) ...@@ -30,7 +30,7 @@ static inline bool is_migrate_isolate(int migratetype)
} }
#endif #endif
#define SKIP_HWPOISON 0x1 #define MEMORY_OFFLINE 0x1
#define REPORT_FAILURE 0x2 #define REPORT_FAILURE 0x2
bool has_unmovable_pages(struct zone *zone, struct page *page, int count, bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
...@@ -68,7 +68,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -68,7 +68,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
* Test all pages in [start_pfn, end_pfn) are isolated or not. * Test all pages in [start_pfn, end_pfn) are isolated or not.
*/ */
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages); int isol_flags);
struct page *alloc_migrate_target(struct page *page, unsigned long private); struct page *alloc_migrate_target(struct page *page, unsigned long private);
......
...@@ -1239,7 +1239,7 @@ static bool is_pageblock_removable_nolock(unsigned long pfn) ...@@ -1239,7 +1239,7 @@ static bool is_pageblock_removable_nolock(unsigned long pfn)
if (!zone_spans_pfn(zone, pfn)) if (!zone_spans_pfn(zone, pfn))
return false; return false;
return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON); return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, MEMORY_OFFLINE);
} }
/* Checks if this range of memory is likely to be hot-removable. */ /* Checks if this range of memory is likely to be hot-removable. */
...@@ -1482,7 +1482,7 @@ check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, ...@@ -1482,7 +1482,7 @@ check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
{ {
int ret; int ret;
long offlined = *(long *)data; long offlined = *(long *)data;
ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, MEMORY_OFFLINE);
offlined = nr_pages; offlined = nr_pages;
if (!ret) if (!ret)
*(long *)data += offlined; *(long *)data += offlined;
...@@ -1638,7 +1638,7 @@ static int __ref __offline_pages(unsigned long start_pfn, ...@@ -1638,7 +1638,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
/* set above range as isolated */ /* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn, ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE, MIGRATE_MOVABLE,
SKIP_HWPOISON | REPORT_FAILURE); MEMORY_OFFLINE | REPORT_FAILURE);
if (ret) { if (ret) {
mem_hotplug_done(); mem_hotplug_done();
return ret; return ret;
......
...@@ -8181,7 +8181,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, ...@@ -8181,7 +8181,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* The HWPoisoned page may be not in buddy system, and * The HWPoisoned page may be not in buddy system, and
* page_count() is not 0. * page_count() is not 0.
*/ */
if ((flags & SKIP_HWPOISON) && PageHWPoison(page)) if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
continue; continue;
if (__PageMovable(page)) if (__PageMovable(page))
...@@ -8397,7 +8397,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, ...@@ -8397,7 +8397,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
} }
/* Make sure the range is really isolated. */ /* Make sure the range is really isolated. */
if (test_pages_isolated(outer_start, end, false)) { if (test_pages_isolated(outer_start, end, 0)) {
pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n", pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
__func__, outer_start, end); __func__, outer_start, end);
ret = -EBUSY; ret = -EBUSY;
......
...@@ -245,7 +245,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -245,7 +245,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
*/ */
static unsigned long static unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages) int flags)
{ {
struct page *page; struct page *page;
...@@ -262,7 +262,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, ...@@ -262,7 +262,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
* simple way to verify that as VM_BUG_ON(), though. * simple way to verify that as VM_BUG_ON(), though.
*/ */
pfn += 1 << page_order(page); pfn += 1 << page_order(page);
else if (skip_hwpoisoned_pages && PageHWPoison(page)) else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
/* A HWPoisoned page cannot be also PageBuddy */ /* A HWPoisoned page cannot be also PageBuddy */
pfn++; pfn++;
else else
...@@ -274,7 +274,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, ...@@ -274,7 +274,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
/* Caller should ensure that requested range is in a single zone */ /* Caller should ensure that requested range is in a single zone */
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages) int isol_flags)
{ {
unsigned long pfn, flags; unsigned long pfn, flags;
struct page *page; struct page *page;
...@@ -296,8 +296,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, ...@@ -296,8 +296,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
/* Check all pages are free or marked as ISOLATED */ /* Check all pages are free or marked as ISOLATED */
zone = page_zone(page); zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
skip_hwpoisoned_pages);
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
trace_test_pages_isolated(start_pfn, end_pfn, pfn); trace_test_pages_isolated(start_pfn, end_pfn, pfn);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册