From 716653dde5c15087a1973541d518df0b07aee01a Mon Sep 17 00:00:00 2001 From: Cheng Jian Date: Mon, 28 Jan 2019 10:09:09 +0800 Subject: [PATCH] livepatch/arm64: support livepatch emit plt call euler inclusion category: feature Bugzilla: 5507 CVE: N/A ---------------------------------------- The livepatch without ftrace mode uses the direct jump method to implement the livepatch. When KASLR is enabled, the address of the symbol which need relocate in the module may exceeds the range of short jump. In the module, this is implemented by the PLT sections. In previous versions, kpatch-build front-tools create an section named livepatch.pltcount to store the number of the relocations in the size field, we append enough space in .plt section for the long jump plts by module_frob_arch_sections. Now, This's no longer needed. The .klp.rela.objname.secname section store all symbols that required relocate by livepatch. For commit 425595a7fc20 ("livepatch: reuse module loader code to write relocations") merged, load_module can create enough plt entries for livepatch by module_frob_arch_sections. we will fix it soon. Signed-off-by: Cheng Jian Reviewed-by: Li Bin Signed-off-by: Yang Yingliang --- arch/arm64/include/asm/module.h | 8 ++++++ arch/arm64/kernel/module-plts.c | 46 +++++++++++++++++++++++++++++++++ arch/arm64/kernel/module.c | 8 +++++- kernel/module.c | 6 +++++ 4 files changed, 67 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 97d0ef12e2ff..b14c1beb98c3 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -33,6 +33,11 @@ struct mod_arch_specific { /* for CONFIG_DYNAMIC_FTRACE */ struct plt_entry *ftrace_trampoline; + +#ifdef CONFIG_LIVEPATCH + struct plt_entry *core_plts; + bool have_plts; +#endif }; #endif @@ -41,6 +46,9 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val); +u64 livepatch_emit_plt_entry(struct module *mod, void *loc, + const Elf64_Rela *rela, Elf64_Sym *sym); + #ifdef CONFIG_RANDOMIZE_BASE extern u64 module_alloc_base; #else diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index a32540647c85..d83383330778 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -42,6 +42,48 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, return (u64)&plt[i]; } +#ifdef CONFIG_LIVEPATCH +void klp_get_core_plts(struct module *mod) +{ + if (is_livepatch_module(mod) && mod->arch.have_plts) + mod->arch.core_plts = (struct plt_entry *) + mod->arch.core.plt->sh_addr; +} + +u64 livepatch_emit_plt_entry(struct module *mod, void *loc, + const Elf64_Rela *rela, Elf64_Sym *sym) +{ + struct mod_plt_sec *pltsec = &mod->arch.core; + struct plt_entry *plt = (struct plt_entry *)mod->arch.core_plts; + int i = pltsec->plt_num_entries; + u64 val = sym->st_value + rela->r_addend; + + plt[i] = get_plt_entry(val); + + /* + * Check if the entry we just created is a duplicate. Given that the + * relocations are sorted, this will be the last entry we allocated. + * (if one exists). + */ + if (i > 0 && plt_entries_equal(plt + i, plt + i - 1)) + return (u64)&plt[i - 1]; + + pltsec->plt_num_entries++; + if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) + return 0; + + return (u64)&plt[i]; +} +#else +u64 livepatch_emit_plt_entry(struct module *mod, void *loc, + const Elf64_Rela *rela, Elf64_Sym *sym) +{ + WARN(1, "Live patching support is disabled, but catch SHF_RELA_LIVEPATCH relocation\n"); + + return 0; +} +#endif /* #ifdef CONFIG_LIVEPATCH */ + #ifdef CONFIG_ARM64_ERRATUM_843419 u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val) { @@ -265,6 +307,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, break; } } + + if (mod->arch.core.plt) + mod->arch.have_plts = true; + mod->arch.core_plts = NULL; #endif mod->arch.core.plt->sh_type = SHT_NOBITS; diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index f0f27aeefb73..fedd1bac58c2 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -413,7 +413,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && ovf == -ERANGE) { - val = module_emit_plt_entry(me, loc, &rel[i], sym); + if (!(sechdrs[relsec].sh_flags + & SHF_RELA_LIVEPATCH)) + val = module_emit_plt_entry(me, + loc, &rel[i], sym); + else + val = livepatch_emit_plt_entry(me, + loc, &rel[i], sym); if (!val) return -ENOEXEC; ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, diff --git a/kernel/module.c b/kernel/module.c index 6746c85511fe..37e8092233e7 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2036,6 +2036,10 @@ static void module_disable_nx(const struct module *mod) { } #endif #ifdef CONFIG_LIVEPATCH +void __weak klp_get_core_plts(struct module *mod) +{ +} + /* * Persist Elf information about a module. Copy the Elf header, * section header table, section string table, and symtab section @@ -2084,6 +2088,8 @@ static int copy_module_elf(struct module *mod, struct load_info *info) mod->klp_info->sechdrs[symndx].sh_addr = \ (unsigned long) mod->core_kallsyms.symtab; + klp_get_core_plts(mod); + return 0; free_sechdrs: -- GitLab