提交 6ad91658 编写于 作者: K Keith Mannthey 提交者: Andi Kleen

[PATCH] x86_64 kernel mapping fix

Fix for the x86_64 kernel mapping code.  Without this patch the update path
only inits one pmd_page worth of memory and tramples any entries on it.  now
the calling convention to phys_pmd_init and phys_init is to always pass a
[pmd/pud] page not an offset within a page.

Signed-off-by: Keith Mannthey<kmannth@us.ibm.com>
Signed-off-by: NAndi Kleen <ak@suse.de>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
上级 abf0f109
...@@ -250,12 +250,13 @@ __init void early_iounmap(void *addr, unsigned long size) ...@@ -250,12 +250,13 @@ __init void early_iounmap(void *addr, unsigned long size)
} }
static void __meminit static void __meminit
phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
{ {
int i; int i = pmd_index(address);
for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
unsigned long entry; unsigned long entry;
pmd_t *pmd = pmd_page + pmd_index(address);
if (address >= end) { if (address >= end) {
if (!after_bootmem) if (!after_bootmem)
...@@ -263,6 +264,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) ...@@ -263,6 +264,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
set_pmd(pmd, __pmd(0)); set_pmd(pmd, __pmd(0));
break; break;
} }
if (pmd_val(*pmd))
continue;
entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
entry &= __supported_pte_mask; entry &= __supported_pte_mask;
set_pmd(pmd, __pmd(entry)); set_pmd(pmd, __pmd(entry));
...@@ -272,45 +277,41 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) ...@@ -272,45 +277,41 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
static void __meminit static void __meminit
phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
{ {
pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address)); pmd_t *pmd = pmd_offset(pud,0);
spin_lock(&init_mm.page_table_lock);
if (pmd_none(*pmd)) { phys_pmd_init(pmd, address, end);
spin_lock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
phys_pmd_init(pmd, address, end); __flush_tlb_all();
spin_unlock(&init_mm.page_table_lock);
__flush_tlb_all();
}
} }
static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
{ {
long i = pud_index(address); int i = pud_index(addr);
pud = pud + i;
if (after_bootmem && pud_val(*pud)) {
phys_pmd_update(pud, address, end);
return;
}
for (; i < PTRS_PER_PUD; pud++, i++) { for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
int map; int map;
unsigned long paddr, pmd_phys; unsigned long pmd_phys;
pud_t *pud = pud_page + pud_index(addr);
pmd_t *pmd; pmd_t *pmd;
paddr = (address & PGDIR_MASK) + i*PUD_SIZE; if (addr >= end)
if (paddr >= end)
break; break;
if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) { if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
set_pud(pud, __pud(0)); set_pud(pud, __pud(0));
continue; continue;
} }
if (pud_val(*pud)) {
phys_pmd_update(pud, addr, end);
continue;
}
pmd = alloc_low_page(&map, &pmd_phys); pmd = alloc_low_page(&map, &pmd_phys);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
phys_pmd_init(pmd, paddr, end); phys_pmd_init(pmd, addr, end);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
unmap_low_page(map); unmap_low_page(map);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册