提交 9bb5a391 编写于 作者: M Michal Hocko 提交者: Linus Torvalds

mm, memory_hotplug: fix memmap initialization

Bharata has noticed that onlining a newly added memory doesn't increase
the total memory, pointing to commit f7f99100 ("mm: stop zeroing
memory during allocation in vmemmap") as a culprit.  This commit has
changed the way how the memory for memmaps is initialized and moves it
from the allocation time to the initialization time.  This works
properly for the early memmap init path.

It doesn't work for the memory hotplug though because we need to mark
page as reserved when the sparsemem section is created and later
initialize it completely during onlining.  memmap_init_zone is called in
the early stage of onlining.  With the current code it calls
__init_single_page and as such it clears up the whole stage and
therefore online_pages_range skips those pages.

Fix this by skipping mm_zero_struct_page in __init_single_page for
memory hotplug path.  This is quite uggly but unifying both early init
and memory hotplug init paths is a large project.  Make sure we plug the
regression at least.

Link: http://lkml.kernel.org/r/20180130101141.GW21609@dhcp22.suse.cz
Fixes: f7f99100 ("mm: stop zeroing memory during allocation in vmemmap")
Signed-off-by: NMichal Hocko <mhocko@suse.com>
Reported-by: NBharata B Rao <bharata@linux.vnet.ibm.com>
Tested-by: NBharata B Rao <bharata@linux.vnet.ibm.com>
Reviewed-by: NPavel Tatashin <pasha.tatashin@oracle.com>
Cc: Steven Sistare <steven.sistare@oracle.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Bob Picco <bob.picco@oracle.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 da391d64
...@@ -1177,9 +1177,10 @@ static void free_one_page(struct zone *zone, ...@@ -1177,9 +1177,10 @@ static void free_one_page(struct zone *zone,
} }
static void __meminit __init_single_page(struct page *page, unsigned long pfn, static void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid) unsigned long zone, int nid, bool zero)
{ {
mm_zero_struct_page(page); if (zero)
mm_zero_struct_page(page);
set_page_links(page, zone, nid, pfn); set_page_links(page, zone, nid, pfn);
init_page_count(page); init_page_count(page);
page_mapcount_reset(page); page_mapcount_reset(page);
...@@ -1194,9 +1195,9 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn, ...@@ -1194,9 +1195,9 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
} }
static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
int nid) int nid, bool zero)
{ {
return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); return __init_single_page(pfn_to_page(pfn), pfn, zone, nid, zero);
} }
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
...@@ -1217,7 +1218,7 @@ static void __meminit init_reserved_page(unsigned long pfn) ...@@ -1217,7 +1218,7 @@ static void __meminit init_reserved_page(unsigned long pfn)
if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
break; break;
} }
__init_single_pfn(pfn, zid, nid); __init_single_pfn(pfn, zid, nid, true);
} }
#else #else
static inline void init_reserved_page(unsigned long pfn) static inline void init_reserved_page(unsigned long pfn)
...@@ -1534,7 +1535,7 @@ static unsigned long __init deferred_init_pages(int nid, int zid, ...@@ -1534,7 +1535,7 @@ static unsigned long __init deferred_init_pages(int nid, int zid,
} else { } else {
page++; page++;
} }
__init_single_page(page, pfn, zid, nid); __init_single_page(page, pfn, zid, nid, true);
nr_pages++; nr_pages++;
} }
return (nr_pages); return (nr_pages);
...@@ -5399,15 +5400,20 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, ...@@ -5399,15 +5400,20 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
* can be created for invalid pages (for alignment) * can be created for invalid pages (for alignment)
* check here not to call set_pageblock_migratetype() against * check here not to call set_pageblock_migratetype() against
* pfn out of zone. * pfn out of zone.
*
* Please note that MEMMAP_HOTPLUG path doesn't clear memmap
* because this is done early in sparse_add_one_section
*/ */
if (!(pfn & (pageblock_nr_pages - 1))) { if (!(pfn & (pageblock_nr_pages - 1))) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
__init_single_page(page, pfn, zone, nid); __init_single_page(page, pfn, zone, nid,
context != MEMMAP_HOTPLUG);
set_pageblock_migratetype(page, MIGRATE_MOVABLE); set_pageblock_migratetype(page, MIGRATE_MOVABLE);
cond_resched(); cond_resched();
} else { } else {
__init_single_pfn(pfn, zone, nid); __init_single_pfn(pfn, zone, nid,
context != MEMMAP_HOTPLUG);
} }
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册