提交 06c830a4 编写于 作者: K Kirill A. Shutemov 提交者: Ingo Molnar

x86/power: Add 5-level paging support

set_up_temporary_text_mapping() and relocate_restore_code() require
adjustments to handle additional page table level.
Signed-off-by: NKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20170313143309.16020-7-kirill.shutemov@linux.intel.com
[ Minor readability edits. ]
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 b50858ce
...@@ -49,6 +49,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) ...@@ -49,6 +49,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
{ {
pmd_t *pmd; pmd_t *pmd;
pud_t *pud; pud_t *pud;
p4d_t *p4d;
/* /*
* The new mapping only has to cover the page containing the image * The new mapping only has to cover the page containing the image
...@@ -63,6 +64,13 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) ...@@ -63,6 +64,13 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
* the virtual address space after switching over to the original page * the virtual address space after switching over to the original page
* tables used by the image kernel. * tables used by the image kernel.
*/ */
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
if (!p4d)
return -ENOMEM;
}
pud = (pud_t *)get_safe_page(GFP_ATOMIC); pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud) if (!pud)
return -ENOMEM; return -ENOMEM;
...@@ -75,8 +83,13 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) ...@@ -75,8 +83,13 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC)); __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
set_pud(pud + pud_index(restore_jump_address), set_pud(pud + pud_index(restore_jump_address),
__pud(__pa(pmd) | _KERNPG_TABLE)); __pud(__pa(pmd) | _KERNPG_TABLE));
set_pgd(pgd + pgd_index(restore_jump_address), if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
__pgd(__pa(pud) | _KERNPG_TABLE)); set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE));
set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE));
} else {
/* No p4d for 4-level paging: point the pgd to the pud page table */
set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(pud) | _KERNPG_TABLE));
}
return 0; return 0;
} }
...@@ -124,7 +137,10 @@ static int set_up_temporary_mappings(void) ...@@ -124,7 +137,10 @@ static int set_up_temporary_mappings(void)
static int relocate_restore_code(void) static int relocate_restore_code(void)
{ {
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud; pud_t *pud;
pmd_t *pmd;
pte_t *pte;
relocated_restore_code = get_safe_page(GFP_ATOMIC); relocated_restore_code = get_safe_page(GFP_ATOMIC);
if (!relocated_restore_code) if (!relocated_restore_code)
...@@ -134,22 +150,25 @@ static int relocate_restore_code(void) ...@@ -134,22 +150,25 @@ static int relocate_restore_code(void)
/* Make the page containing the relocated code executable */ /* Make the page containing the relocated code executable */
pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code); pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
pud = pud_offset(pgd, relocated_restore_code); p4d = p4d_offset(pgd, relocated_restore_code);
if (p4d_large(*p4d)) {
set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
goto out;
}
pud = pud_offset(p4d, relocated_restore_code);
if (pud_large(*pud)) { if (pud_large(*pud)) {
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX)); set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
} else { goto out;
pmd_t *pmd = pmd_offset(pud, relocated_restore_code); }
pmd = pmd_offset(pud, relocated_restore_code);
if (pmd_large(*pmd)) { if (pmd_large(*pmd)) {
set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
} else { goto out;
pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
}
} }
pte = pte_offset_kernel(pmd, relocated_restore_code);
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
out:
__flush_tlb_all(); __flush_tlb_all();
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册