提交 42d1a731 编写于 作者: W Will Deacon

Merge branch 'aarch64/for-next/debug-virtual' into aarch64/for-next/core

Merge core DEBUG_VIRTUAL changes from Laura Abbott. Later arm and arm64
support depends on these.

* aarch64/for-next/debug-virtual:
  drivers: firmware: psci: Use __pa_symbol for kernel symbol
  mm/usercopy: Switch to using lm_alias
  mm/kasan: Switch to using __pa_symbol and lm_alias
  kexec: Switch to __pa_symbol
  mm: Introduce lm_alias
  mm/cma: Cleanup highmem check
  lib/Kconfig.debug: Add ARCH_HAS_DEBUG_VIRTUAL
......@@ -46,6 +46,7 @@ config X86
select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
......
......@@ -383,7 +383,7 @@ static int psci_suspend_finisher(unsigned long index)
u32 *state = __this_cpu_read(psci_power_state);
return psci_ops.cpu_suspend(state[index - 1],
virt_to_phys(cpu_resume));
__pa_symbol(cpu_resume));
}
int psci_cpu_suspend_enter(unsigned long index)
......
......@@ -76,6 +76,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
#endif
#ifndef lm_alias
#define lm_alias(x) __va(__pa_symbol(x))
#endif
/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
......
......@@ -1399,7 +1399,7 @@ void __weak arch_crash_save_vmcoreinfo(void)
phys_addr_t __weak paddr_vmcoreinfo_note(void)
{
return __pa((unsigned long)(char *)&vmcoreinfo_note);
return __pa_symbol((unsigned long)(char *)&vmcoreinfo_note);
}
static int __init crash_save_vmcoreinfo_init(void)
......
......@@ -622,9 +622,12 @@ config DEBUG_VM_PGFLAGS
If unsure, say N.
config ARCH_HAS_DEBUG_VIRTUAL
bool
config DEBUG_VIRTUAL
bool "Debug VM translations"
depends on DEBUG_KERNEL && X86
depends on DEBUG_KERNEL && ARCH_HAS_DEBUG_VIRTUAL
help
Enable some costly sanity checks in virtual to page code. This can
catch mistakes with virt_to_page() and friends.
......
......@@ -235,18 +235,13 @@ int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t highmem_start;
int ret = 0;
#ifdef CONFIG_X86
/*
* high_memory isn't direct mapped memory so retrieving its physical
* address isn't appropriate. But it would be useful to check the
* physical address of the highmem boundary so it's justifiable to get
* the physical address from it. On x86 there is a validation check for
* this case, so the following workaround is needed to avoid it.
* We can't use __pa(high_memory) directly, since high_memory
* isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
* complain. Find the boundary by adding one to the last valid
* address.
*/
highmem_start = __pa_nodebug(high_memory);
#else
highmem_start = __pa(high_memory);
#endif
highmem_start = __pa(high_memory - 1) + 1;
pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
__func__, &size, &base, &limit, &alignment);
......
......@@ -15,6 +15,7 @@
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/pfn.h>
#include <asm/page.h>
......@@ -49,7 +50,7 @@ static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
pte_t *pte = pte_offset_kernel(pmd, addr);
pte_t zero_pte;
zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL);
zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL);
zero_pte = pte_wrprotect(zero_pte);
while (addr + PAGE_SIZE <= end) {
......@@ -69,7 +70,7 @@ static void __init zero_pmd_populate(pud_t *pud, unsigned long addr,
next = pmd_addr_end(addr, end);
if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
continue;
}
......@@ -92,9 +93,9 @@ static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
pmd_t *pmd;
pud_populate(&init_mm, pud, kasan_zero_pmd);
pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
continue;
}
......@@ -135,11 +136,11 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
* puds,pmds, so pgd_populate(), pud_populate()
* is noops.
*/
pgd_populate(&init_mm, pgd, kasan_zero_pud);
pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud));
pud = pud_offset(pgd, addr);
pud_populate(&init_mm, pud, kasan_zero_pmd);
pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
continue;
}
......
......@@ -108,13 +108,13 @@ static inline const char *check_kernel_text_object(const void *ptr,
* __pa() is not just the reverse of __va(). This can be detected
* and checked:
*/
textlow_linear = (unsigned long)__va(__pa(textlow));
textlow_linear = (unsigned long)lm_alias(textlow);
/* No different mapping: we're done. */
if (textlow_linear == textlow)
return NULL;
/* Check the secondary mapping... */
texthigh_linear = (unsigned long)__va(__pa(texthigh));
texthigh_linear = (unsigned long)lm_alias(texthigh);
if (overlaps(ptr, n, textlow_linear, texthigh_linear))
return "<linear kernel text>";
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册