提交 c26a535b 编写于 作者: C Catalin Marinas

Merge tag 'for-3.20' of http://git.linaro.org/people/ard.biesheuvel/linux-arm into upstream

UEFI updates for arm64

This series consists of a reimplementation of the virtual remapping of
UEFI Runtime Services in a way that is stable across kexec, including
the required preparatory refactoring and other work to set the stage,
and some cleaning up afterwards to remove boot services memory and
identitity map handling that has now become redundant.

* tag 'for-3.20' of http://git.linaro.org/people/ard.biesheuvel/linux-arm:
  arm64/efi: remove idmap manipulations from UEFI code
  arm64/efi: remove free_boot_services() and friends
  arm64/efi: move SetVirtualAddressMap() to UEFI stub
  arm64/efi: set EFI_ALLOC_ALIGN to 64 KB
  efi: efistub: allow allocation alignment larger than EFI_PAGE_SIZE
  efi: split off remapping code from efi_config_init()
  arm64/mm: add create_pgd_mapping() to create private page tables
  arm64/mm: add explicit struct_mm argument to __create_mapping()
......@@ -6,29 +6,35 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
extern void efi_idmap_init(void);
extern void efi_virtmap_init(void);
#else
#define efi_init()
#define efi_idmap_init()
#define efi_virtmap_init()
#endif
#define efi_call_virt(f, ...) \
({ \
efi_##f##_t *__f = efi.systab->runtime->f; \
efi_##f##_t *__f; \
efi_status_t __s; \
\
kernel_neon_begin(); \
efi_virtmap_load(); \
__f = efi.systab->runtime->f; \
__s = __f(__VA_ARGS__); \
efi_virtmap_unload(); \
kernel_neon_end(); \
__s; \
})
#define __efi_call_virt(f, ...) \
({ \
efi_##f##_t *__f = efi.systab->runtime->f; \
efi_##f##_t *__f; \
\
kernel_neon_begin(); \
efi_virtmap_load(); \
__f = efi.systab->runtime->f; \
__f(__VA_ARGS__); \
efi_virtmap_unload(); \
kernel_neon_end(); \
})
......@@ -44,4 +50,28 @@ extern void efi_idmap_init(void);
#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
#define EFI_ALLOC_ALIGN SZ_64K
/*
* On ARM systems, virtually remapped UEFI runtime services are set up in three
* distinct stages:
* - The stub retrieves the final version of the memory map from UEFI, populates
* the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime
* service to communicate the new mapping to the firmware (Note that the new
* mapping is not live at this time)
* - During early boot, the page tables are allocated and populated based on the
* virt_addr fields in the memory map, but only if all descriptors with the
* EFI_MEMORY_RUNTIME attribute have a non-zero value for virt_addr. If this
* succeeds, the EFI_VIRTMAP flag is set to indicate that the virtual mappings
* have been installed successfully.
* - During an early initcall(), the UEFI Runtime Services are enabled and the
* EFI_RUNTIME_SERVICES bit set if some conditions are met, i.e., we need a
* non-early mapping of the UEFI system table, and we need to have the virtmap
* installed.
*/
#define EFI_VIRTMAP EFI_ARCH_1
void efi_virtmap_load(void);
void efi_virtmap_unload(void);
#endif /* _ASM_EFI_H */
......@@ -31,7 +31,8 @@ extern void paging_init(void);
extern void setup_mm_for_reboot(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
/* create an identity mapping for memory (or io if map_io is true) */
extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot);
#endif
......@@ -264,6 +264,11 @@ static inline pmd_t pte_pmd(pte_t pte)
return __pmd(pte_val(pte));
}
static inline pgprot_t mk_sect_prot(pgprot_t prot)
{
return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
}
/*
* THP definitions.
*/
......
......@@ -11,25 +11,31 @@
*
*/
#include <linux/atomic.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/bootmem.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/preempt.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
struct efi_memory_map memmap;
static efi_runtime_services_t *runtime;
static u64 efi_system_table;
static int uefi_debug __initdata;
......@@ -48,30 +54,33 @@ static int __init is_normal_ram(efi_memory_desc_t *md)
return 0;
}
static void __init efi_setup_idmap(void)
/*
* Translate a EFI virtual address into a physical address: this is necessary,
* as some data members of the EFI system table are virtually remapped after
* SetVirtualAddressMap() has been called.
*/
static phys_addr_t efi_to_phys(unsigned long addr)
{
struct memblock_region *r;
efi_memory_desc_t *md;
u64 paddr, npages, size;
for_each_memblock(memory, r)
create_id_mapping(r->base, r->size, 0);
/* map runtime io spaces */
for_each_efi_memory_desc(&memmap, md) {
if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md))
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
paddr = md->phys_addr;
npages = md->num_pages;
memrange_efi_to_native(&paddr, &npages);
size = npages << PAGE_SHIFT;
create_id_mapping(paddr, size, 1);
if (md->virt_addr == 0)
/* no virtual mapping has been installed by the stub */
break;
if (md->virt_addr <= addr &&
(addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
return md->phys_addr + addr - md->virt_addr;
}
return addr;
}
static int __init uefi_init(void)
{
efi_char16_t *c16;
void *config_tables;
u64 table_size;
char vendor[100] = "unknown";
int i, retval;
......@@ -99,7 +108,7 @@ static int __init uefi_init(void)
efi.systab->hdr.revision & 0xffff);
/* Show what we know for posterity */
c16 = early_memremap(efi.systab->fw_vendor,
c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
sizeof(vendor));
if (c16) {
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
......@@ -112,8 +121,14 @@ static int __init uefi_init(void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
retval = efi_config_init(NULL);
table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
config_tables = early_memremap(efi_to_phys(efi.systab->tables),
table_size);
retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
sizeof(efi_config_table_64_t), NULL);
early_memunmap(config_tables, table_size);
out:
early_memunmap(efi.systab, sizeof(efi_system_table_t));
return retval;
......@@ -163,9 +178,7 @@ static __init void reserve_regions(void)
if (is_normal_ram(md))
early_init_dt_add_memory_arch(paddr, size);
if (is_reserve_region(md) ||
md->type == EFI_BOOT_SERVICES_CODE ||
md->type == EFI_BOOT_SERVICES_DATA) {
if (is_reserve_region(md)) {
memblock_reserve(paddr, size);
if (uefi_debug)
pr_cont("*");
......@@ -178,123 +191,6 @@ static __init void reserve_regions(void)
set_bit(EFI_MEMMAP, &efi.flags);
}
static u64 __init free_one_region(u64 start, u64 end)
{
u64 size = end - start;
if (uefi_debug)
pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1);
free_bootmem_late(start, size);
return size;
}
static u64 __init free_region(u64 start, u64 end)
{
u64 map_start, map_end, total = 0;
if (end <= start)
return total;
map_start = (u64)memmap.phys_map;
map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map));
map_start &= PAGE_MASK;
if (start < map_end && end > map_start) {
/* region overlaps UEFI memmap */
if (start < map_start)
total += free_one_region(start, map_start);
if (map_end < end)
total += free_one_region(map_end, end);
} else
total += free_one_region(start, end);
return total;
}
static void __init free_boot_services(void)
{
u64 total_freed = 0;
u64 keep_end, free_start, free_end;
efi_memory_desc_t *md;
/*
* If kernel uses larger pages than UEFI, we have to be careful
* not to inadvertantly free memory we want to keep if there is
* overlap at the kernel page size alignment. We do not want to
* free is_reserve_region() memory nor the UEFI memmap itself.
*
* The memory map is sorted, so we keep track of the end of
* any previous region we want to keep, remember any region
* we want to free and defer freeing it until we encounter
* the next region we want to keep. This way, before freeing
* it, we can clip it as needed to avoid freeing memory we
* want to keep for UEFI.
*/
keep_end = 0;
free_start = 0;
for_each_efi_memory_desc(&memmap, md) {
u64 paddr, npages, size;
if (is_reserve_region(md)) {
/*
* We don't want to free any memory from this region.
*/
if (free_start) {
/* adjust free_end then free region */
if (free_end > md->phys_addr)
free_end -= PAGE_SIZE;
total_freed += free_region(free_start, free_end);
free_start = 0;
}
keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
continue;
}
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA) {
/* no need to free this region */
continue;
}
/*
* We want to free memory from this region.
*/
paddr = md->phys_addr;
npages = md->num_pages;
memrange_efi_to_native(&paddr, &npages);
size = npages << PAGE_SHIFT;
if (free_start) {
if (paddr <= free_end)
free_end = paddr + size;
else {
total_freed += free_region(free_start, free_end);
free_start = paddr;
free_end = paddr + size;
}
} else {
free_start = paddr;
free_end = paddr + size;
}
if (free_start < keep_end) {
free_start += PAGE_SIZE;
if (free_start >= free_end)
free_start = 0;
}
}
if (free_start)
total_freed += free_region(free_start, free_end);
if (total_freed)
pr_info("Freed 0x%llx bytes of EFI boot services memory",
total_freed);
}
void __init efi_init(void)
{
struct efi_fdt_params params;
......@@ -319,61 +215,14 @@ void __init efi_init(void)
reserve_regions();
}
void __init efi_idmap_init(void)
{
if (!efi_enabled(EFI_BOOT))
return;
/* boot time idmap_pg_dir is incomplete, so fill in missing parts */
efi_setup_idmap();
early_memunmap(memmap.map, memmap.map_end - memmap.map);
}
static int __init remap_region(efi_memory_desc_t *md, void **new)
{
u64 paddr, vaddr, npages, size;
paddr = md->phys_addr;
npages = md->num_pages;
memrange_efi_to_native(&paddr, &npages);
size = npages << PAGE_SHIFT;
if (is_normal_ram(md))
vaddr = (__force u64)ioremap_cache(paddr, size);
else
vaddr = (__force u64)ioremap(paddr, size);
if (!vaddr) {
pr_err("Unable to remap 0x%llx pages @ %p\n",
npages, (void *)paddr);
return 0;
}
/* adjust for any rounding when EFI and system pagesize differs */
md->virt_addr = vaddr + (md->phys_addr - paddr);
if (uefi_debug)
pr_info(" EFI remap 0x%012llx => %p\n",
md->phys_addr, (void *)md->virt_addr);
memcpy(*new, md, memmap.desc_size);
*new += memmap.desc_size;
return 1;
}
/*
* Switch UEFI from an identity map to a kernel virtual map
* Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
* non-early mapping of the UEFI system table and virtual mappings for all
* EFI_MEMORY_RUNTIME regions.
*/
static int __init arm64_enter_virtual_mode(void)
static int __init arm64_enable_runtime_services(void)
{
efi_memory_desc_t *md;
phys_addr_t virtmap_phys;
void *virtmap, *virt_md;
efi_status_t status;
u64 mapsize;
int count = 0;
unsigned long flags;
if (!efi_enabled(EFI_BOOT)) {
pr_info("EFI services will not be available.\n");
......@@ -395,81 +244,28 @@ static int __init arm64_enter_virtual_mode(void)
efi.memmap = &memmap;
/* Map the runtime regions */
virtmap = kmalloc(mapsize, GFP_KERNEL);
if (!virtmap) {
pr_err("Failed to allocate EFI virtual memmap\n");
return -1;
}
virtmap_phys = virt_to_phys(virtmap);
virt_md = virtmap;
for_each_efi_memory_desc(&memmap, md) {
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
if (!remap_region(md, &virt_md))
goto err_unmap;
++count;
}
efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
efi.systab = (__force void *)ioremap_cache(efi_system_table,
sizeof(efi_system_table_t));
if (!efi.systab) {
/*
* If we have no virtual mapping for the System Table at this
* point, the memory map doesn't cover the physical offset where
* it resides. This means the System Table will be inaccessible
* to Runtime Services themselves once the virtual mapping is
* installed.
*/
pr_err("Failed to remap EFI System Table -- buggy firmware?\n");
goto err_unmap;
pr_err("Failed to remap EFI System Table\n");
return -1;
}
set_bit(EFI_SYSTEM_TABLES, &efi.flags);
local_irq_save(flags);
cpu_switch_mm(idmap_pg_dir, &init_mm);
/* Call SetVirtualAddressMap with the physical address of the map */
runtime = efi.systab->runtime;
efi.set_virtual_address_map = runtime->set_virtual_address_map;
status = efi.set_virtual_address_map(count * memmap.desc_size,
memmap.desc_size,
memmap.desc_version,
(efi_memory_desc_t *)virtmap_phys);
cpu_set_reserved_ttbr0();
flush_tlb_all();
local_irq_restore(flags);
kfree(virtmap);
free_boot_services();
if (status != EFI_SUCCESS) {
pr_err("Failed to set EFI virtual address map! [%lx]\n",
status);
if (!efi_enabled(EFI_VIRTMAP)) {
pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
return -1;
}
/* Set up runtime services function pointers */
runtime = efi.systab->runtime;
efi_native_runtime_setup();
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
efi.runtime_version = efi.systab->hdr.revision;
return 0;
err_unmap:
/* unmap all mappings that succeeded: there are 'count' of those */
for (virt_md = virtmap; count--; virt_md += memmap.desc_size) {
md = virt_md;
iounmap((__force void __iomem *)md->virt_addr);
}
kfree(virtmap);
return -1;
}
early_initcall(arm64_enter_virtual_mode);
early_initcall(arm64_enable_runtime_services);
static int __init arm64_dmi_init(void)
{
......@@ -484,3 +280,80 @@ static int __init arm64_dmi_init(void)
return 0;
}
core_initcall(arm64_dmi_init);
static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
static struct mm_struct efi_mm = {
.mm_rb = RB_ROOT,
.pgd = efi_pgd,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
.mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
INIT_MM_CONTEXT(efi_mm)
};
static void efi_set_pgd(struct mm_struct *mm)
{
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
if (icache_is_aivivt())
__flush_icache_all();
}
void efi_virtmap_load(void)
{
preempt_disable();
efi_set_pgd(&efi_mm);
}
void efi_virtmap_unload(void)
{
efi_set_pgd(current->active_mm);
preempt_enable();
}
void __init efi_virtmap_init(void)
{
efi_memory_desc_t *md;
if (!efi_enabled(EFI_BOOT))
return;
for_each_efi_memory_desc(&memmap, md) {
u64 paddr, npages, size;
pgprot_t prot;
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
if (WARN(md->virt_addr == 0,
"UEFI virtual mapping incomplete or missing -- no entry found for 0x%llx\n",
md->phys_addr))
return;
paddr = md->phys_addr;
npages = md->num_pages;
memrange_efi_to_native(&paddr, &npages);
size = npages << PAGE_SHIFT;
pr_info(" EFI remap 0x%016llx => %p\n",
md->phys_addr, (void *)md->virt_addr);
/*
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
* executable, everything else can be mapped with the XN bits
* set.
*/
if (!is_normal_ram(md))
prot = __pgprot(PROT_DEVICE_nGnRE);
else if (md->type == EFI_RUNTIME_SERVICES_CODE)
prot = PAGE_KERNEL_EXEC;
else
prot = PAGE_KERNEL;
create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
}
set_bit(EFI_VIRTMAP, &efi.flags);
early_memunmap(memmap.map, memmap.map_end - memmap.map);
}
......@@ -401,7 +401,7 @@ void __init setup_arch(char **cmdline_p)
paging_init();
request_standard_resources();
efi_idmap_init();
efi_virtmap_init();
early_ioremap_reset();
unflatten_device_tree();
......
......@@ -156,29 +156,19 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
int map_io)
static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot)
{
pmd_t *pmd;
unsigned long next;
pmdval_t prot_sect;
pgprot_t prot_pte;
if (map_io) {
prot_sect = PROT_SECT_DEVICE_nGnRE;
prot_pte = __pgprot(PROT_DEVICE_nGnRE);
} else {
prot_sect = PROT_SECT_NORMAL_EXEC;
prot_pte = PAGE_KERNEL_EXEC;
}
/*
* Check for initial section mappings in the pgd/pud and remove them.
*/
if (pud_none(*pud) || pud_bad(*pud)) {
pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
pud_populate(&init_mm, pud, pmd);
pud_populate(mm, pud, pmd);
}
pmd = pmd_offset(pud, addr);
......@@ -187,7 +177,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0) {
pmd_t old_pmd =*pmd;
set_pmd(pmd, __pmd(phys | prot_sect));
set_pmd(pmd, __pmd(phys |
pgprot_val(mk_sect_prot(prot))));
/*
* Check for previous table entries created during
* boot (__create_page_tables) and flush them.
......@@ -196,22 +187,22 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
flush_tlb_all();
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
prot_pte);
prot);
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
unsigned long end, phys_addr_t phys,
int map_io)
static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
if (pgd_none(*pgd)) {
pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
pgd_populate(&init_mm, pgd, pud);
pgd_populate(mm, pgd, pud);
}
BUG_ON(pgd_bad(*pgd));
......@@ -222,10 +213,11 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
/*
* For 4K granule only, attempt to put down a 1GB block
*/
if (!map_io && (PAGE_SHIFT == 12) &&
if ((PAGE_SHIFT == 12) &&
((addr | next | phys) & ~PUD_MASK) == 0) {
pud_t old_pud = *pud;
set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
set_pud(pud, __pud(phys |
pgprot_val(mk_sect_prot(prot))));
/*
* If we have an old value for a pud, it will
......@@ -240,7 +232,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
flush_tlb_all();
}
} else {
alloc_init_pmd(pud, addr, next, phys, map_io);
alloc_init_pmd(mm, pud, addr, next, phys, prot);
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
......@@ -250,9 +242,9 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
* Create the page directory entries and any necessary page tables for the
* mapping specified by 'md'.
*/
static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
int map_io)
static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot)
{
unsigned long addr, length, end, next;
......@@ -262,7 +254,7 @@ static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
end = addr + length;
do {
next = pgd_addr_end(addr, end);
alloc_init_pud(pgd, addr, next, phys, map_io);
alloc_init_pud(mm, pgd, addr, next, phys, prot);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
......@@ -275,17 +267,15 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
&phys, virt);
return;
}
__create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0);
__create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
size, PAGE_KERNEL_EXEC);
}
void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot)
{
if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
pr_warn("BUG: not creating id mapping for %pa\n", &addr);
return;
}
__create_mapping(&idmap_pg_dir[pgd_index(addr)],
addr, addr, size, map_io);
__create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot);
}
static void __init map_mem(void)
......
......@@ -293,29 +293,15 @@ static __init int match_config_table(efi_guid_t *guid,
return 0;
}
int __init efi_config_init(efi_config_table_type_t *arch_tables)
int __init efi_config_parse_tables(void *config_tables, int count, int sz,
efi_config_table_type_t *arch_tables)
{
void *config_tables, *tablep;
int i, sz;
if (efi_enabled(EFI_64BIT))
sz = sizeof(efi_config_table_64_t);
else
sz = sizeof(efi_config_table_32_t);
/*
* Let's see what config tables the firmware passed to us.
*/
config_tables = early_memremap(efi.systab->tables,
efi.systab->nr_tables * sz);
if (config_tables == NULL) {
pr_err("Could not map Configuration table!\n");
return -ENOMEM;
}
void *tablep;
int i;
tablep = config_tables;
pr_info("");
for (i = 0; i < efi.systab->nr_tables; i++) {
for (i = 0; i < count; i++) {
efi_guid_t guid;
unsigned long table;
......@@ -328,8 +314,6 @@ int __init efi_config_init(efi_config_table_type_t *arch_tables)
if (table64 >> 32) {
pr_cont("\n");
pr_err("Table located above 4GB, disabling EFI.\n");
early_memunmap(config_tables,
efi.systab->nr_tables * sz);
return -EINVAL;
}
#endif
......@@ -344,13 +328,37 @@ int __init efi_config_init(efi_config_table_type_t *arch_tables)
tablep += sz;
}
pr_cont("\n");
early_memunmap(config_tables, efi.systab->nr_tables * sz);
set_bit(EFI_CONFIG_TABLES, &efi.flags);
return 0;
}
int __init efi_config_init(efi_config_table_type_t *arch_tables)
{
void *config_tables;
int sz, ret;
if (efi_enabled(EFI_64BIT))
sz = sizeof(efi_config_table_64_t);
else
sz = sizeof(efi_config_table_32_t);
/*
* Let's see what config tables the firmware passed to us.
*/
config_tables = early_memremap(efi.systab->tables,
efi.systab->nr_tables * sz);
if (config_tables == NULL) {
pr_err("Could not map Configuration table!\n");
return -ENOMEM;
}
ret = efi_config_parse_tables(config_tables, efi.systab->nr_tables, sz,
arch_tables);
early_memunmap(config_tables, efi.systab->nr_tables * sz);
return ret;
}
#ifdef CONFIG_EFI_VARS_MODULE
static int __init efi_load_efivars(void)
{
......
......@@ -295,3 +295,62 @@ unsigned long __init efi_entry(void *handle, efi_system_table_t *sys_table,
fail:
return EFI_ERROR;
}
/*
* This is the base address at which to start allocating virtual memory ranges
* for UEFI Runtime Services. This is in the low TTBR0 range so that we can use
* any allocation we choose, and eliminate the risk of a conflict after kexec.
* The value chosen is the largest non-zero power of 2 suitable for this purpose
* both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
* be mapped efficiently.
*/
#define EFI_RT_VIRTUAL_BASE 0x40000000
/*
* efi_get_virtmap() - create a virtual mapping for the EFI memory map
*
* This function populates the virt_addr fields of all memory region descriptors
* in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors
* are also copied to @runtime_map, and their total count is returned in @count.
*/
void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
int *count)
{
u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
efi_memory_desc_t *out = runtime_map;
int l;
for (l = 0; l < map_size; l += desc_size) {
efi_memory_desc_t *in = (void *)memory_map + l;
u64 paddr, size;
if (!(in->attribute & EFI_MEMORY_RUNTIME))
continue;
/*
* Make the mapping compatible with 64k pages: this allows
* a 4k page size kernel to kexec a 64k page size kernel and
* vice versa.
*/
paddr = round_down(in->phys_addr, SZ_64K);
size = round_up(in->num_pages * EFI_PAGE_SIZE +
in->phys_addr - paddr, SZ_64K);
/*
* Avoid wasting memory on PTEs by choosing a virtual base that
* is compatible with section mappings if this region has the
* appropriate size and physical alignment. (Sections are 2 MB
* on 4k granule kernels)
*/
if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
efi_virt_base = round_up(efi_virt_base, SZ_2M);
in->virt_addr = efi_virt_base + in->phys_addr - paddr;
efi_virt_base += size;
memcpy(out, in, desc_size);
out = (void *)out + desc_size;
++*count;
}
}
......@@ -32,6 +32,15 @@
static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
/*
* Allow the platform to override the allocation granularity: this allows
* systems that have the capability to run with a larger page size to deal
* with the allocations for initrd and fdt more efficiently.
*/
#ifndef EFI_ALLOC_ALIGN
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
struct file_info {
efi_file_handle_t *handle;
u64 size;
......@@ -150,10 +159,10 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
* a specific address. We are doing page-based allocations,
* so we must be aligned to a page.
*/
if (align < EFI_PAGE_SIZE)
align = EFI_PAGE_SIZE;
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
again:
for (i = 0; i < map_size / desc_size; i++) {
efi_memory_desc_t *desc;
......@@ -235,10 +244,10 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
* a specific address. We are doing page-based allocations,
* so we must be aligned to a page.
*/
if (align < EFI_PAGE_SIZE)
align = EFI_PAGE_SIZE;
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
for (i = 0; i < map_size / desc_size; i++) {
efi_memory_desc_t *desc;
unsigned long m = (unsigned long)map;
......@@ -292,7 +301,7 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
if (!size)
return;
nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
efi_call_early(free_pages, addr, nr_pages);
}
......@@ -561,7 +570,7 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
* to the preferred address. If that fails, allocate as low
* as possible while respecting the required alignment.
*/
nr_pages = round_up(alloc_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
status = efi_call_early(allocate_pages,
EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
nr_pages, &efi_addr);
......
......@@ -39,4 +39,8 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
void *get_fdt(efi_system_table_t *sys_table);
void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
int *count);
#endif
......@@ -14,6 +14,8 @@
#include <linux/libfdt.h>
#include <asm/efi.h>
#include "efistub.h"
efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
unsigned long orig_fdt_size,
void *fdt, int new_fdt_size, char *cmdline_ptr,
......@@ -193,9 +195,26 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long map_size, desc_size;
u32 desc_ver;
unsigned long mmap_key;
efi_memory_desc_t *memory_map;
efi_memory_desc_t *memory_map, *runtime_map;
unsigned long new_fdt_size;
efi_status_t status;
int runtime_entry_count = 0;
/*
* Get a copy of the current memory map that we will use to prepare
* the input for SetVirtualAddressMap(). We don't have to worry about
* subsequent allocations adding entries, since they could not affect
* the number of EFI_MEMORY_RUNTIME regions.
*/
status = efi_get_memory_map(sys_table, &runtime_map, &map_size,
&desc_size, &desc_ver, &mmap_key);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
return status;
}
pr_efi(sys_table,
"Exiting boot services and installing virtual address map...\n");
/*
* Estimate size of new FDT, and allocate memory for it. We
......@@ -248,12 +267,48 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
}
}
/*
* Update the memory map with virtual addresses. The function will also
* populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
* entries so that we can pass it straight into SetVirtualAddressMap()
*/
efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
&runtime_entry_count);
/* Now we are ready to exit_boot_services.*/
status = sys_table->boottime->exit_boot_services(handle, mmap_key);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
if (status == EFI_SUCCESS)
return status;
/* Install the new virtual address map */
svam = sys_table->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
desc_ver, runtime_map);
/*
* We are beyond the point of no return here, so if the call to
* SetVirtualAddressMap() failed, we need to signal that to the
* incoming kernel but proceed normally otherwise.
*/
if (status != EFI_SUCCESS) {
int l;
/*
* Set the virtual address field of all
* EFI_MEMORY_RUNTIME entries to 0. This will signal
* the incoming kernel that no virtual translation has
* been installed.
*/
for (l = 0; l < map_size; l += desc_size) {
efi_memory_desc_t *p = (void *)memory_map + l;
if (p->attribute & EFI_MEMORY_RUNTIME)
p->virt_addr = 0;
}
}
return EFI_SUCCESS;
}
pr_efi_err(sys_table, "Exit boot services failed.\n");
......@@ -264,6 +319,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
efi_free(sys_table, new_fdt_size, *new_fdt_addr);
fail:
sys_table->boottime->free_pool(runtime_map);
return EFI_LOAD_ERROR;
}
......
......@@ -875,6 +875,8 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
extern int efi_config_init(efi_config_table_type_t *arch_tables);
extern int efi_config_parse_tables(void *config_tables, int count, int sz,
efi_config_table_type_t *arch_tables);
extern u64 efi_get_iobase (void);
extern u32 efi_mem_type (unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册