提交 b92df1de 编写于 作者: P Paul Burton 提交者: Linus Torvalds

mm: page_alloc: skip over regions of invalid pfns where possible

When using a sparse memory model memmap_init_zone() when invoked with
the MEMMAP_EARLY context will skip over pages which aren't valid - ie.
which aren't in a populated region of the sparse memory map.  However if
the memory map is extremely sparse then it can spend a long time
linearly checking each PFN in a large non-populated region of the memory
map & skipping it in turn.

When CONFIG_HAVE_MEMBLOCK_NODE_MAP is enabled, we have sufficient
information to quickly discover the next valid PFN given an invalid one
by searching through the list of memory regions & skipping forwards to
the first PFN covered by the memory region to the right of the
non-populated region.  Implement this in order to speed up
memmap_init_zone() for systems with extremely sparse memory maps.

James said "I have tested this patch on a virtual model of a Samurai CPU
with a sparse memory map.  The kernel boot time drops from 109 to
62 seconds. "

Link: http://lkml.kernel.org/r/20161125185518.29885-1-paul.burton@imgtec.comSigned-off-by: NPaul Burton <paul.burton@imgtec.com>
Tested-by: NJames Hartley <james.hartley@imgtec.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 7f354a54
...@@ -203,6 +203,7 @@ int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, ...@@ -203,6 +203,7 @@ int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn); unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid); unsigned long *out_end_pfn, int *out_nid);
unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
/** /**
* for_each_mem_pfn_range - early memory pfn range iterator * for_each_mem_pfn_range - early memory pfn range iterator
......
...@@ -1105,6 +1105,31 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid, ...@@ -1105,6 +1105,31 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
*out_nid = r->nid; *out_nid = r->nid;
} }
unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
unsigned long max_pfn)
{
struct memblock_type *type = &memblock.memory;
unsigned int right = type->cnt;
unsigned int mid, left = 0;
phys_addr_t addr = PFN_PHYS(pfn + 1);
do {
mid = (right + left) / 2;
if (addr < type->regions[mid].base)
right = mid;
else if (addr >= (type->regions[mid].base +
type->regions[mid].size))
left = mid + 1;
else {
/* addr is within the region, so pfn + 1 is valid */
return min(pfn + 1, max_pfn);
}
} while (left < right);
return min(PHYS_PFN(type->regions[right].base), max_pfn);
}
/** /**
* memblock_set_node - set node ID on memblock regions * memblock_set_node - set node ID on memblock regions
* @base: base of area to set node ID for * @base: base of area to set node ID for
......
...@@ -5103,8 +5103,17 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, ...@@ -5103,8 +5103,17 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
if (context != MEMMAP_EARLY) if (context != MEMMAP_EARLY)
goto not_early; goto not_early;
if (!early_pfn_valid(pfn)) if (!early_pfn_valid(pfn)) {
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
* Skip to the pfn preceding the next valid one (or
* end_pfn), such that we hit a valid pfn (or end_pfn)
* on our next iteration of the loop.
*/
pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
#endif
continue; continue;
}
if (!early_pfn_in_nid(pfn, nid)) if (!early_pfn_in_nid(pfn, nid))
continue; continue;
if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised)) if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册