提交 2bf3bbfb 编写于 作者: L Liu Shixin 提交者: Zheng Zengkai

Revert "arm64: remove page granularity limitation from KFENCE"

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4XWBS

--------------------------------

This reverts commit d3d0ca13

We found that this patch may lead to a TLB conflicts abort. This may results by
the changes from block<->table mappings. This problem may have something to do
with the Break-Before-Make sequence rule but not yet clear.
Signed-off-by: NLiu Shixin <liushixin2@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 b3029c71
...@@ -8,77 +8,9 @@ ...@@ -8,77 +8,9 @@
#ifndef __ASM_KFENCE_H #ifndef __ASM_KFENCE_H
#define __ASM_KFENCE_H #define __ASM_KFENCE_H
#include <linux/kfence.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/pgalloc.h>
static inline int split_pud_page(pud_t *pud, unsigned long addr) static inline bool arch_kfence_init_pool(void) { return true; }
{
int i;
pmd_t *pmd = pmd_alloc_one(&init_mm, addr);
unsigned long pfn = PFN_DOWN(__pa(addr));
if (!pmd)
return -ENOMEM;
for (i = 0; i < PTRS_PER_PMD; i++)
set_pmd(pmd + i, pmd_mkhuge(pfn_pmd(pfn + i * PTRS_PER_PTE, PAGE_KERNEL)));
smp_wmb(); /* See comment in __pte_alloc */
pud_populate(&init_mm, pud, pmd);
flush_tlb_kernel_range(addr, addr + PUD_SIZE);
return 0;
}
static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
{
int i;
pte_t *pte = pte_alloc_one_kernel(&init_mm);
unsigned long pfn = PFN_DOWN(__pa(addr));
if (!pte)
return -ENOMEM;
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL));
smp_wmb(); /* See comment in __pte_alloc */
pmd_populate_kernel(&init_mm, pmd, pte);
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
return 0;
}
static inline bool arch_kfence_init_pool(void)
{
unsigned long addr;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
addr += PAGE_SIZE) {
pgd = pgd_offset(&init_mm, addr);
if (pgd_leaf(*pgd))
return false;
p4d = p4d_offset(pgd, addr);
if (p4d_leaf(*p4d))
return false;
pud = pud_offset(p4d, addr);
if (pud_leaf(*pud)) {
if (split_pud_page(pud, addr & PUD_MASK))
return false;
}
pmd = pmd_offset(pud, addr);
if (pmd_leaf(*pmd)) {
if (split_pmd_page(pmd, addr & PMD_MASK))
return false;
}
}
return true;
}
static inline bool kfence_protect_page(unsigned long addr, bool protect) static inline bool kfence_protect_page(unsigned long addr, bool protect)
{ {
......
...@@ -477,7 +477,8 @@ static void __init map_mem(pgd_t *pgdp) ...@@ -477,7 +477,8 @@ static void __init map_mem(pgd_t *pgdp)
int flags = 0, eflags = 0; int flags = 0, eflags = 0;
u64 i; u64 i;
if (rodata_full || debug_pagealloc_enabled()) if (rodata_full || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE))
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/* /*
...@@ -1486,7 +1487,8 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -1486,7 +1487,8 @@ int arch_add_memory(int nid, u64 start, u64 size,
* KFENCE requires linear map to be mapped at page granularity, so that * KFENCE requires linear map to be mapped at page granularity, so that
* it is possible to protect/unprotect single pages in the KFENCE pool. * it is possible to protect/unprotect single pages in the KFENCE pool.
*/ */
if (rodata_full || debug_pagealloc_enabled()) if (rodata_full || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE))
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册