提交 e8adc240 编写于 作者: S Sean Christopherson 提交者: Jialin Zhang

x86/kasan: Add helpers to align shadow addresses up and down

mainline inclusion
from mainline-v6.2-rc1
commit bde258d9
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I6C6UC
CVE: CVE-2023-0597

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=bde258d97409f2a45243cb393a55ea9ecfc7aba5

--------------------------------

Add helpers to dedup code for aligning shadow address up/down to page
boundaries when translating an address to its shadow.

No functional change intended.
Signed-off-by: NSean Christopherson <seanjc@google.com>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: NAndrey Ryabinin <ryabinin.a.a@gmail.com>
Link: https://lkml.kernel.org/r/20221110203504.1985010-5-seanjc@google.comSigned-off-by: NTong Tiangen <tongtiangen@huawei.com>
Reviewed-by: NChen Wandun <chenwandun@huawei.com>
Reviewed-by: NWang Weiyang <wangweiyang2@huawei.com>
Signed-off-by: NJialin Zhang <zhangjialin11@huawei.com>
上级 e7b3ac69
......@@ -318,22 +318,33 @@ void __init kasan_early_init(void)
kasan_map_early_shadow(init_top_pgt);
}
static unsigned long kasan_mem_to_shadow_align_down(unsigned long va)
{
unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va);
return round_down(shadow, PAGE_SIZE);
}
static unsigned long kasan_mem_to_shadow_align_up(unsigned long va)
{
unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va);
return round_up(shadow, PAGE_SIZE);
}
void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid)
{
unsigned long shadow_start, shadow_end;
shadow_start = (unsigned long)kasan_mem_to_shadow(va);
shadow_start = round_down(shadow_start, PAGE_SIZE);
shadow_end = (unsigned long)kasan_mem_to_shadow(va + size);
shadow_end = round_up(shadow_end, PAGE_SIZE);
shadow_start = kasan_mem_to_shadow_align_down((unsigned long)va);
shadow_end = kasan_mem_to_shadow_align_up((unsigned long)va + size);
kasan_populate_shadow(shadow_start, shadow_end, nid);
}
void __init kasan_init(void)
{
unsigned long shadow_cea_begin, shadow_cea_end;
int i;
void *shadow_cea_begin, *shadow_cea_end;
memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
......@@ -374,16 +385,9 @@ void __init kasan_init(void)
map_range(&pfn_mapped[i]);
}
shadow_cea_begin = (void *)CPU_ENTRY_AREA_BASE;
shadow_cea_begin = kasan_mem_to_shadow(shadow_cea_begin);
shadow_cea_begin = (void *)round_down(
(unsigned long)shadow_cea_begin, PAGE_SIZE);
shadow_cea_end = (void *)(CPU_ENTRY_AREA_BASE +
CPU_ENTRY_AREA_MAP_SIZE);
shadow_cea_end = kasan_mem_to_shadow(shadow_cea_end);
shadow_cea_end = (void *)round_up(
(unsigned long)shadow_cea_end, PAGE_SIZE);
shadow_cea_begin = kasan_mem_to_shadow_align_down(CPU_ENTRY_AREA_BASE);
shadow_cea_end = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_BASE +
CPU_ENTRY_AREA_MAP_SIZE);
kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
......@@ -405,9 +409,9 @@ void __init kasan_init(void)
kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)VMALLOC_END + 1),
shadow_cea_begin);
(void *)shadow_cea_begin);
kasan_populate_early_shadow(shadow_cea_end,
kasan_populate_early_shadow((void *)shadow_cea_end,
kasan_mem_to_shadow((void *)__START_KERNEL_map));
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册