提交 a2172e25 编写于 作者: H Huang, Ying 提交者: Ingo Molnar

x86: fix some bugs about EFI runtime code mapping

This patch fixes some bugs of making EFI runtime code executable.

- Use change_page_attr in i386 too. Because the runtime code may be
  mapped not through ioremap.

- If there is no _PAGE_NX in __supported_pte_mask, the change_page_attr
  is not called.

- Make efi_ioremap map pages as PAGE_KERNEL_EXEC_NOCACHE, because EFI runtime
  code may be mapped through efi_ioremap.
Signed-off-by: NHuang Ying <ying.huang@intel.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 cd582896
...@@ -40,6 +40,8 @@ ...@@ -40,6 +40,8 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/efi.h> #include <asm/efi.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#define EFI_DEBUG 1 #define EFI_DEBUG 1
#define PFX "EFI: " #define PFX "EFI: "
...@@ -379,6 +381,32 @@ void __init efi_init(void) ...@@ -379,6 +381,32 @@ void __init efi_init(void)
#endif #endif
} }
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static void __init runtime_code_page_mkexec(void)
{
efi_memory_desc_t *md;
unsigned long end;
void *p;
if (!(__supported_pte_mask & _PAGE_NX))
return;
/* Make EFI runtime service code area executable */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
if (md->type == EFI_RUNTIME_SERVICES_CODE &&
(end >> PAGE_SHIFT) <= end_pfn_map)
change_page_attr_addr(md->virt_addr,
md->num_pages,
PAGE_KERNEL_EXEC_NOCACHE);
}
__flush_tlb_all();
}
#else
static inline void __init runtime_code_page_mkexec(void) { }
#endif
/* /*
* This function will switch the EFI runtime services to virtual mode. * This function will switch the EFI runtime services to virtual mode.
* Essentially, look through the EFI memmap and map every region that * Essentially, look through the EFI memmap and map every region that
...@@ -399,9 +427,9 @@ void __init efi_enter_virtual_mode(void) ...@@ -399,9 +427,9 @@ void __init efi_enter_virtual_mode(void)
md = p; md = p;
if (!(md->attribute & EFI_MEMORY_RUNTIME)) if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue; continue;
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
if ((md->attribute & EFI_MEMORY_WB) && if ((md->attribute & EFI_MEMORY_WB) &&
(((md->phys_addr + (md->num_pages<<EFI_PAGE_SHIFT)) >> ((end >> PAGE_SHIFT) <= end_pfn_map))
PAGE_SHIFT) < end_pfn_map))
md->virt_addr = (unsigned long)__va(md->phys_addr); md->virt_addr = (unsigned long)__va(md->phys_addr);
else else
md->virt_addr = (unsigned long) md->virt_addr = (unsigned long)
...@@ -410,7 +438,6 @@ void __init efi_enter_virtual_mode(void) ...@@ -410,7 +438,6 @@ void __init efi_enter_virtual_mode(void)
if (!md->virt_addr) if (!md->virt_addr)
printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n", printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n",
(unsigned long long)md->phys_addr); (unsigned long long)md->phys_addr);
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
if ((md->phys_addr <= (unsigned long)efi_phys.systab) && if ((md->phys_addr <= (unsigned long)efi_phys.systab) &&
((unsigned long)efi_phys.systab < end)) ((unsigned long)efi_phys.systab < end))
efi.systab = (efi_system_table_t *)(unsigned long) efi.systab = (efi_system_table_t *)(unsigned long)
...@@ -448,9 +475,7 @@ void __init efi_enter_virtual_mode(void) ...@@ -448,9 +475,7 @@ void __init efi_enter_virtual_mode(void)
efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
efi.reset_system = virt_efi_reset_system; efi.reset_system = virt_efi_reset_system;
efi.set_virtual_address_map = virt_efi_set_virtual_address_map; efi.set_virtual_address_map = virt_efi_set_virtual_address_map;
#ifdef CONFIG_X86_64
runtime_code_page_mkexec(); runtime_code_page_mkexec();
#endif
} }
/* /*
......
...@@ -33,7 +33,6 @@ ...@@ -33,7 +33,6 @@
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/efi.h> #include <asm/efi.h>
...@@ -55,7 +54,7 @@ static void __init early_mapping_set_exec(unsigned long start, ...@@ -55,7 +54,7 @@ static void __init early_mapping_set_exec(unsigned long start,
else else
set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \ set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
__supported_pte_mask)); __supported_pte_mask));
if (pte_huge(*kpte)) if (level == 4)
start = (start + PMD_SIZE) & PMD_MASK; start = (start + PMD_SIZE) & PMD_MASK;
else else
start = (start + PAGE_SIZE) & PAGE_MASK; start = (start + PAGE_SIZE) & PAGE_MASK;
...@@ -67,6 +66,9 @@ static void __init early_runtime_code_mapping_set_exec(int executable) ...@@ -67,6 +66,9 @@ static void __init early_runtime_code_mapping_set_exec(int executable)
efi_memory_desc_t *md; efi_memory_desc_t *md;
void *p; void *p;
if (!(__supported_pte_mask & _PAGE_NX))
return;
/* Make EFI runtime service code area executable */ /* Make EFI runtime service code area executable */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p; md = p;
...@@ -116,22 +118,6 @@ void __init efi_reserve_bootmem(void) ...@@ -116,22 +118,6 @@ void __init efi_reserve_bootmem(void)
memmap.nr_map * memmap.desc_size); memmap.nr_map * memmap.desc_size);
} }
void __init runtime_code_page_mkexec(void)
{
efi_memory_desc_t *md;
void *p;
/* Make EFI runtime service code area executable */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
if (md->type == EFI_RUNTIME_SERVICES_CODE)
change_page_attr_addr(md->virt_addr,
md->num_pages,
PAGE_KERNEL_EXEC);
}
__flush_tlb_all();
}
void __iomem * __init efi_ioremap(unsigned long offset, void __iomem * __init efi_ioremap(unsigned long offset,
unsigned long size) unsigned long size)
{ {
...@@ -146,8 +132,8 @@ void __iomem * __init efi_ioremap(unsigned long offset, ...@@ -146,8 +132,8 @@ void __iomem * __init efi_ioremap(unsigned long offset,
return NULL; return NULL;
for (i = 0; i < pages; i++) { for (i = 0; i < pages; i++) {
set_fixmap_nocache(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped, __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
offset); offset, PAGE_KERNEL_EXEC_NOCACHE);
offset += PAGE_SIZE; offset += PAGE_SIZE;
pages_mapped++; pages_mapped++;
} }
......
...@@ -95,6 +95,5 @@ extern void *efi_ioremap(unsigned long offset, unsigned long size); ...@@ -95,6 +95,5 @@ extern void *efi_ioremap(unsigned long offset, unsigned long size);
extern void efi_reserve_bootmem(void); extern void efi_reserve_bootmem(void);
extern void efi_call_phys_prelog(void); extern void efi_call_phys_prelog(void);
extern void efi_call_phys_epilog(void); extern void efi_call_phys_epilog(void);
extern void runtime_code_page_mkexec(void);
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册