提交 7482b0e9 编写于 作者: Y Yinghai Lu 提交者: Ingo Molnar

x86: fix init_memory_mapping over boundary v3

some ram-end boundary only has page alignment, instead of 2M alignment.

v2: make init_memory_mapping more solid: start could be any value other than 0
v3: fix NON PAE by handling left over in kernel_physical_mapping
Signed-off-by: NYinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 df366e98
...@@ -195,7 +195,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base, ...@@ -195,7 +195,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
unsigned pages_2m = 0, pages_4k = 0; unsigned pages_2m = 0, pages_4k = 0;
unsigned limit_pfn = end >> PAGE_SHIFT; unsigned limit_pfn = end >> PAGE_SHIFT;
pgd_idx = pgd_index(PAGE_OFFSET); pgd_idx = pgd_index(start + PAGE_OFFSET);
pgd = pgd_base + pgd_idx; pgd = pgd_base + pgd_idx;
pfn = start >> PAGE_SHIFT; pfn = start >> PAGE_SHIFT;
...@@ -218,7 +218,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base, ...@@ -218,7 +218,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
* and overlapping MTRRs into large pages can cause * and overlapping MTRRs into large pages can cause
* slowdowns. * slowdowns.
*/ */
if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) { if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0) &&
(pfn + PTRS_PER_PTE) <= limit_pfn) {
unsigned int addr2; unsigned int addr2;
pgprot_t prot = PAGE_KERNEL_LARGE; pgprot_t prot = PAGE_KERNEL_LARGE;
...@@ -233,13 +234,12 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base, ...@@ -233,13 +234,12 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
set_pmd(pmd, pfn_pmd(pfn, prot)); set_pmd(pmd, pfn_pmd(pfn, prot));
pfn += PTRS_PER_PTE; pfn += PTRS_PER_PTE;
max_pfn_mapped = pfn;
continue; continue;
} }
pte = one_page_table_init(pmd); pte = one_page_table_init(pmd);
for (pte_ofs = 0; for (pte_ofs = 0;
pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte_ofs < PTRS_PER_PTE && pfn < limit_pfn;
pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
pgprot_t prot = PAGE_KERNEL; pgprot_t prot = PAGE_KERNEL;
...@@ -249,7 +249,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base, ...@@ -249,7 +249,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
pages_4k++; pages_4k++;
set_pte(pte, pfn_pte(pfn, prot)); set_pte(pte, pfn_pte(pfn, prot));
} }
max_pfn_mapped = pfn;
} }
} }
update_page_count(PG_LEVEL_2M, pages_2m); update_page_count(PG_LEVEL_2M, pages_2m);
...@@ -729,7 +728,7 @@ void __init setup_bootmem_allocator(void) ...@@ -729,7 +728,7 @@ void __init setup_bootmem_allocator(void)
static void __init find_early_table_space(unsigned long end) static void __init find_early_table_space(unsigned long end)
{ {
unsigned long puds, pmds, tables, start; unsigned long puds, pmds, ptes, tables, start;
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
tables = PAGE_ALIGN(puds * sizeof(pud_t)); tables = PAGE_ALIGN(puds * sizeof(pud_t));
...@@ -737,10 +736,15 @@ static void __init find_early_table_space(unsigned long end) ...@@ -737,10 +736,15 @@ static void __init find_early_table_space(unsigned long end)
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
if (!cpu_has_pse) { if (cpu_has_pse) {
int ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long extra;
tables += PAGE_ALIGN(ptes * sizeof(pte_t)); extra = end - ((end>>21) << 21);
} extra += (2UL<<20);
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else
ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
tables += PAGE_ALIGN(ptes * sizeof(pte_t));
/* /*
* RED-PEN putting page tables only on node 0 could * RED-PEN putting page tables only on node 0 could
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册