提交 cbba6579 编写于 作者: P Pekka Enberg 提交者: Ingo Molnar

x86: unify kernel_physical_mapping_init() call in init_memory_mapping()

Impact: cleanup

The 64-bit version of init_memory_mapping() uses the last mapped
address returned from kernel_physical_mapping_init() whereas the
32-bit version doesn't. This patch adds relevant ifdefs to both
versions of the function to reduce the diff between them.
Signed-off-by: NPekka Enberg <penberg@cs.helsinki.fi>
Cc: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <1236257708-27269-8-git-send-email-penberg@cs.helsinki.fi>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 c464573c
......@@ -929,6 +929,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
unsigned long page_size_mask = 0;
unsigned long start_pfn, end_pfn;
unsigned long pos;
unsigned long ret;
struct map_range mr[NR_RANGE_MR];
int nr_range, i;
......@@ -1040,11 +1041,18 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (!after_bootmem)
find_early_table_space(end, use_pse, use_gbpages);
#ifdef CONFIG_X86_32
for (i = 0; i < nr_range; i++)
kernel_physical_mapping_init(
mr[i].start >> PAGE_SHIFT,
mr[i].end >> PAGE_SHIFT,
mr[i].page_size_mask == (1<<PG_LEVEL_2M));
ret = end;
#else /* CONFIG_X86_64 */
for (i = 0; i < nr_range; i++)
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
mr[i].page_size_mask);
#endif
early_ioremap_page_table_range_init();
......@@ -1059,7 +1067,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (!after_bootmem)
early_memtest(start, end);
return end >> PAGE_SHIFT;
return ret >> PAGE_SHIFT;
}
......
......@@ -686,10 +686,10 @@ static int save_mr(struct map_range *mr, int nr_range,
unsigned long __init_refok init_memory_mapping(unsigned long start,
unsigned long end)
{
unsigned long last_map_addr = 0;
unsigned long page_size_mask = 0;
unsigned long start_pfn, end_pfn;
unsigned long pos;
unsigned long ret;
struct map_range mr[NR_RANGE_MR];
int nr_range, i;
......@@ -819,10 +819,18 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (!after_bootmem)
find_early_table_space(end, use_pse, use_gbpages);
#ifdef CONFIG_X86_32
for (i = 0; i < nr_range; i++)
kernel_physical_mapping_init(
mr[i].start >> PAGE_SHIFT,
mr[i].end >> PAGE_SHIFT,
mr[i].page_size_mask == (1<<PG_LEVEL_2M));
ret = end;
#else /* CONFIG_X86_64 */
for (i = 0; i < nr_range; i++)
last_map_addr = kernel_physical_mapping_init(
mr[i].start, mr[i].end,
mr[i].page_size_mask);
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
mr[i].page_size_mask);
#endif
if (!after_bootmem)
mmu_cr4_features = read_cr4();
......@@ -832,13 +840,10 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
reserve_early(table_start << PAGE_SHIFT,
table_end << PAGE_SHIFT, "PGTABLE");
printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
last_map_addr, end);
if (!after_bootmem)
early_memtest(start, end);
return last_map_addr >> PAGE_SHIFT;
return ret >> PAGE_SHIFT;
}
#ifndef CONFIG_NUMA
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册