提交 4d5cf86c 编写于 作者: Y Yinghai Lu 提交者: H. Peter Anvin

x86, memblock: Add get_free_all_memory_range()

get_free_all_memory_range is for CONFIG_NO_BOOTMEM=y, and will be called by
free_all_memory_core_early().

It will use early_node_map aka active ranges subtract memblock.reserved to
get all free range, and those ranges will convert to slab pages.

-v4: increase range size
Signed-off-by: NYinghai Lu <yinghai@kernel.org>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: NH. Peter Anvin <hpa@zytor.com>
上级 9dc5d569
...@@ -8,5 +8,7 @@ void memblock_x86_to_bootmem(u64 start, u64 end); ...@@ -8,5 +8,7 @@ void memblock_x86_to_bootmem(u64 start, u64 end);
void memblock_x86_reserve_range(u64 start, u64 end, char *name); void memblock_x86_reserve_range(u64 start, u64 end, char *name);
void memblock_x86_free_range(u64 start, u64 end); void memblock_x86_free_range(u64 start, u64 end);
struct range;
int get_free_all_memory_range(struct range **rangep, int nodeid);
#endif #endif
...@@ -86,7 +86,103 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) ...@@ -86,7 +86,103 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
return MEMBLOCK_ERROR; return MEMBLOCK_ERROR;
} }
#ifndef CONFIG_NO_BOOTMEM static __init struct range *find_range_array(int count)
{
u64 end, size, mem;
struct range *range;
size = sizeof(struct range) * count;
end = memblock.current_limit;
mem = memblock_find_in_range(0, end, size, sizeof(struct range));
if (mem == MEMBLOCK_ERROR)
panic("can not find more space for range array");
/*
* This range is tempoaray, so don't reserve it, it will not be
* overlapped because We will not alloccate new buffer before
* We discard this one
*/
range = __va(mem);
memset(range, 0, size);
return range;
}
#ifdef CONFIG_NO_BOOTMEM
static void __init memblock_x86_subtract_reserved(struct range *range, int az)
{
u64 final_start, final_end;
struct memblock_region *r;
/* Take out region array itself at first*/
memblock_free_reserved_regions();
pr_info("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
for_each_memblock(reserved, r) {
pr_info(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
final_start = PFN_DOWN(r->base);
final_end = PFN_UP(r->base + r->size);
if (final_start >= final_end)
continue;
subtract_range(range, az, final_start, final_end);
}
/* Put region array back ? */
memblock_reserve_reserved_regions();
}
struct count_data {
int nr;
};
static int __init count_work_fn(unsigned long start_pfn,
unsigned long end_pfn, void *datax)
{
struct count_data *data = datax;
data->nr++;
return 0;
}
static int __init count_early_node_map(int nodeid)
{
struct count_data data;
data.nr = 0;
work_with_active_regions(nodeid, count_work_fn, &data);
return data.nr;
}
int __init get_free_all_memory_range(struct range **rangep, int nodeid)
{
int count;
struct range *range;
int nr_range;
count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
range = find_range_array(count);
nr_range = 0;
/*
* Use early_node_map[] and memblock.reserved.region to get range array
* at first
*/
nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
#ifdef CONFIG_X86_32
subtract_range(range, count, max_low_pfn, -1ULL);
#endif
memblock_x86_subtract_reserved(range, count);
nr_range = clean_sort_range(range, count);
*rangep = range;
return nr_range;
}
#else
void __init memblock_x86_to_bootmem(u64 start, u64 end) void __init memblock_x86_to_bootmem(u64 start, u64 end)
{ {
int count; int count;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册