提交 83863f25 编写于 作者: L Laura Abbott 提交者: Catalin Marinas

arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC

ARCH_SUPPORTS_DEBUG_PAGEALLOC provides a hook to map and unmap
pages for debugging purposes. This requires memory be mapped
with PAGE_SIZE mappings since breaking down larger mappings
at runtime will lead to TLB conflicts. Check if debug_pagealloc
is enabled at runtime and if so, map everyting with PAGE_SIZE
pages. Implement the functions to actually map/unmap the
pages at runtime.
Reviewed-by: NArd Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: NMark Rutland <mark.rutland@arm.com>
Tested-by: NMark Rutland <mark.rutland@arm.com>
Signed-off-by: NLaura Abbott <labbott@fedoraproject.org>
[catalin.marinas@arm.com: static annotation block_mappings_allowed() and #ifdef]
Signed-off-by: NCatalin Marinas <catalin.marinas@arm.com>
上级 132233a7
...@@ -537,6 +537,9 @@ config HOTPLUG_CPU ...@@ -537,6 +537,9 @@ config HOTPLUG_CPU
source kernel/Kconfig.preempt source kernel/Kconfig.preempt
source kernel/Kconfig.hz source kernel/Kconfig.hz
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
config ARCH_HAS_HOLES_MEMORYMODEL config ARCH_HAS_HOLES_MEMORYMODEL
def_bool y if SPARSEMEM def_bool y if SPARSEMEM
......
...@@ -149,6 +149,26 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd) ...@@ -149,6 +149,26 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
} while (pmd++, i++, i < PTRS_PER_PMD); } while (pmd++, i++, i < PTRS_PER_PMD);
} }
#ifdef CONFIG_DEBUG_PAGEALLOC
static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
{
/*
* If debug_page_alloc is enabled we must map the linear map
* using pages. However, other mappings created by
* create_mapping_noalloc must use sections in some cases. Allow
* sections to be used in those cases, where no pgtable_alloc
* function is provided.
*/
return !pgtable_alloc || !debug_pagealloc_enabled();
}
#else
static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
{
return true;
}
#endif
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot, phys_addr_t phys, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void)) phys_addr_t (*pgtable_alloc)(void))
...@@ -181,7 +201,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, ...@@ -181,7 +201,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
/* try section mapping first */ /* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0) { if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
block_mappings_allowed(pgtable_alloc)) {
pmd_t old_pmd =*pmd; pmd_t old_pmd =*pmd;
set_pmd(pmd, __pmd(phys | set_pmd(pmd, __pmd(phys |
pgprot_val(mk_sect_prot(prot)))); pgprot_val(mk_sect_prot(prot))));
...@@ -241,7 +262,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, ...@@ -241,7 +262,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
/* /*
* For 4K granule only, attempt to put down a 1GB block * For 4K granule only, attempt to put down a 1GB block
*/ */
if (use_1G_block(addr, next, phys)) { if (use_1G_block(addr, next, phys) &&
block_mappings_allowed(pgtable_alloc)) {
pud_t old_pud = *pud; pud_t old_pud = *pud;
set_pud(pud, __pud(phys | set_pud(pud, __pud(phys |
pgprot_val(mk_sect_prot(prot)))); pgprot_val(mk_sect_prot(prot))));
......
...@@ -37,14 +37,31 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, ...@@ -37,14 +37,31 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
return 0; return 0;
} }
/*
* This function assumes that the range is mapped with PAGE_SIZE pages.
*/
static int __change_memory_common(unsigned long start, unsigned long size,
pgprot_t set_mask, pgprot_t clear_mask)
{
struct page_change_data data;
int ret;
data.set_mask = set_mask;
data.clear_mask = clear_mask;
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
&data);
flush_tlb_kernel_range(start, start + size);
return ret;
}
static int change_memory_common(unsigned long addr, int numpages, static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask) pgprot_t set_mask, pgprot_t clear_mask)
{ {
unsigned long start = addr; unsigned long start = addr;
unsigned long size = PAGE_SIZE*numpages; unsigned long size = PAGE_SIZE*numpages;
unsigned long end = start + size; unsigned long end = start + size;
int ret;
struct page_change_data data;
struct vm_struct *area; struct vm_struct *area;
if (!PAGE_ALIGNED(addr)) { if (!PAGE_ALIGNED(addr)) {
...@@ -75,14 +92,7 @@ static int change_memory_common(unsigned long addr, int numpages, ...@@ -75,14 +92,7 @@ static int change_memory_common(unsigned long addr, int numpages,
if (!numpages) if (!numpages)
return 0; return 0;
data.set_mask = set_mask; return __change_memory_common(start, size, set_mask, clear_mask);
data.clear_mask = clear_mask;
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
&data);
flush_tlb_kernel_range(start, end);
return ret;
} }
int set_memory_ro(unsigned long addr, int numpages) int set_memory_ro(unsigned long addr, int numpages)
...@@ -114,3 +124,19 @@ int set_memory_x(unsigned long addr, int numpages) ...@@ -114,3 +124,19 @@ int set_memory_x(unsigned long addr, int numpages)
__pgprot(PTE_PXN)); __pgprot(PTE_PXN));
} }
EXPORT_SYMBOL_GPL(set_memory_x); EXPORT_SYMBOL_GPL(set_memory_x);
#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long addr = (unsigned long) page_address(page);
if (enable)
__change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(PTE_VALID),
__pgprot(0));
else
__change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(0),
__pgprot(PTE_VALID));
}
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册