diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 4fa39d1bcf0a2488b4286abdf0080926a56856a4..0458ea044490307c951d6f5a65e62199f5906bbc 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -166,6 +166,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select HAVE_LIVEPATCH_WO_FTRACE if !ARM64_ERRATUM_843419 help ARM 64-bit (AArch64) Linux support. @@ -282,6 +283,8 @@ config ARCH_PROC_KCORE_TEXT source "arch/arm64/Kconfig.platforms" +source "kernel/livepatch/Kconfig" + menu "Bus support" config PCI diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5817b1e6aef5ffc401357a8c41e92c76ac5f472d --- /dev/null +++ b/arch/arm64/include/asm/livepatch.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2014-2019, Huawei. + * Author: Li Bin + * Author: Cheng Jian + * + * livepatch.h - arm64-specific Kernel Live Patching Core + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef _ASM_ARM64_LIVEPATCH_H +#define _ASM_ARM64_LIVEPATCH_H + +#include +#include + + +#ifdef CONFIG_LIVEPATCH + +struct klp_patch; +struct klp_func; + +#define klp_smp_isb() isb() + +static inline int klp_check_compiler_support(void) +{ + return 0; +} + +int arch_klp_patch_func(struct klp_func *func); +void arch_klp_unpatch_func(struct klp_func *func); +int klp_check_calltrace(struct klp_patch *patch, int enable); +#else +#error Live patching support is disabled; check CONFIG_LIVEPATCH +#endif + +#endif /* _ASM_ARM64_LIVEPATCH_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index d33434181626c394da6deec44dfbcd97c364ae5f..74369475cde70c6387cba62efb78e4f707c8c203 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -37,6 +37,7 @@ arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o +arm64-obj-$(CONFIG_LIVEPATCH) += livepatch.o arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o arm64-obj-$(CONFIG_KGDB) += kgdb.o arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \ diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c new file mode 100644 index 0000000000000000000000000000000000000000..a21c3f79d3cc1e20138a4cfd93344017e9446ccb --- /dev/null +++ b/arch/arm64/kernel/livepatch.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * livepatch.c - arm64-specific Kernel Live Patching Core + * + * Copyright (C) 2014 Li Bin + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +#include +#endif + +#ifdef CONFIG_ARM64_MODULE_PLTS +static inline bool offset_in_range(unsigned long pc, unsigned long addr, + long range) +{ + long offset = addr - pc; + + return (offset >= -range && offset < range); +} +#endif + +struct walk_stackframe_args { + struct klp_patch *patch; + int enable; + int ret; +}; + +static inline int klp_compare_address(unsigned long pc, unsigned long func_addr, + unsigned long func_size, const char *func_name) +{ + if (pc >= func_addr && pc < func_addr + func_size) { + pr_err("func %s is in use!\n", func_name); + return -EBUSY; + } + return 0; +} + +static int klp_check_activeness_func(struct stackframe *frame, void *data) +{ + struct walk_stackframe_args *args = data; + struct klp_patch *patch = args->patch; + struct klp_object *obj; + struct klp_func *func; + unsigned long func_addr, func_size; + const char *func_name; + + if (args->ret) + return args->ret; + + for (obj = patch->objs; obj->funcs; obj++) { + for (func = obj->funcs; func->old_name; func++) { + if (args->enable) { + func_addr = func->old_addr; + func_size = func->old_size; + } else { + func_addr = (unsigned long)func->new_func; + func_size = func->new_size; + } + func_name = func->old_name; + args->ret = klp_compare_address(frame->pc, func_addr, + func_size, func_name); + if (args->ret) + return args->ret; + } + } + + return args->ret; +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static unsigned long klp_ftrace_graph_addr(unsigned long addr, + struct task_struct *tsk, + int *graph) +{ + unsigned long ret_addr = 0; + int index = tsk->curr_ret_stack; + + if ((addr + 4) != (unsigned long)return_to_handler) + return ret_addr; + + if (!tsk->ret_stack || index < *graph) + return ret_addr; + + index -= *graph; + ret_addr = tsk->ret_stack[index].ret; + + (*graph)++; + return ret_addr; +} +#else +static unsigned long klp_ftrace_graph_addr(unsigned long addr, + struct task_struct *tsk, + int *graph) +{ + return 0; +} +#endif + +void notrace klp_walk_stackframe(struct stackframe *frame, + int (*fn)(struct stackframe *, void *), + struct task_struct *tsk, void *data) +{ + unsigned long addr; + int graph = 0; + + while (1) { + int ret; + + if (fn(frame, data)) + break; + ret = unwind_frame(NULL, frame); + if (ret < 0) + break; + + addr = klp_ftrace_graph_addr(frame->pc, tsk, &graph); + if (addr) + frame->pc = addr; + } +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + struct task_struct *g, *t; + struct stackframe frame; + int ret = 0; + + struct walk_stackframe_args args = { + .patch = patch, + .enable = enable, + .ret = 0 + }; + + do_each_thread(g, t) { + frame.fp = thread_saved_fp(t); + frame.pc = thread_saved_pc(t); + klp_walk_stackframe(&frame, klp_check_activeness_func, + t, &args); + if (args.ret) { + ret = args.ret; + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL); + goto out; + } + } while_each_thread(g, t); + +out: + return ret; +} + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +#define LJMP_INSN_SIZE 4 + +struct klp_func_node { + struct list_head node; + struct list_head func_stack; + unsigned long old_addr; +#ifdef CONFIG_ARM64_MODULE_PLTS + u32 old_insns[LJMP_INSN_SIZE]; +#else + u32 old_insn; +#endif +}; + +static LIST_HEAD(klp_func_list); + +static struct klp_func_node *klp_find_func_node(unsigned long old_addr) +{ + struct klp_func_node *func_node; + + list_for_each_entry(func_node, &klp_func_list, node) { + if (func_node->old_addr == old_addr) + return func_node; + } + + return NULL; +} + +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + unsigned long pc, new_addr; + u32 insn; + u32 memory_flag = 0; +#ifdef CONFIG_ARM64_MODULE_PLTS + int i; + u32 insns[LJMP_INSN_SIZE]; +#endif + + func_node = klp_find_func_node(func->old_addr); + if (!func_node) { + func_node = kzalloc(sizeof(*func_node), GFP_KERNEL); + if (!func_node) + return -ENOMEM; + memory_flag = 1; + + INIT_LIST_HEAD(&func_node->func_stack); + func_node->old_addr = func->old_addr; + +#ifdef CONFIG_ARM64_MODULE_PLTS + for (i = 0; i < LJMP_INSN_SIZE; i++) { + aarch64_insn_read(((u32 *)func->old_addr) + i, + &func_node->old_insns[i]); + } +#else + aarch64_insn_read((void *)func->old_addr, &func_node->old_insn); +#endif + + list_add_rcu(&func_node->node, &klp_func_list); + } + + list_add_rcu(&func->stack_node, &func_node->func_stack); + + pc = func->old_addr; + new_addr = (unsigned long)func->new_func; + +#ifdef CONFIG_ARM64_MODULE_PLTS + if (offset_in_range(pc, new_addr, SZ_128M)) { + insn = aarch64_insn_gen_branch_imm(pc, new_addr, + AARCH64_INSN_BRANCH_NOLINK); + if (aarch64_insn_patch_text_nosync((void *)pc, insn)) + goto ERR_OUT; + } else { + insns[0] = cpu_to_le32(0x92800010 | (((~new_addr) & 0xffff)) << 5); + insns[1] = cpu_to_le32(0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5); + insns[2] = cpu_to_le32(0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5); + insns[3] = cpu_to_le32(0xd61f0200); + for (i = 0; i < LJMP_INSN_SIZE; i++) { + if (aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i])) + goto ERR_OUT; + } + } +#else + insn = aarch64_insn_gen_branch_imm(pc, new_addr, + AARCH64_INSN_BRANCH_NOLINK); + + if (aarch64_insn_patch_text_nosync((void *)pc, insn)) + goto ERR_OUT; +#endif + return 0; +ERR_OUT: + if (memory_flag) { + list_del_rcu(&func->stack_node); + list_del_rcu(&func_node->node); + kfree(func_node); + } + + return -EPERM; +} + +void arch_klp_unpatch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + struct klp_func *next_func; + unsigned long pc, new_addr; + u32 insn; +#ifdef CONFIG_ARM64_MODULE_PLTS + int i; + u32 insns[LJMP_INSN_SIZE]; +#endif + func_node = klp_find_func_node(func->old_addr); + BUG_ON(!func_node); + pc = func_node->old_addr; + if (list_is_singular(&func_node->func_stack)) { +#ifdef CONFIG_ARM64_MODULE_PLTS + for (i = 0; i < LJMP_INSN_SIZE; i++) + insns[i] = func_node->old_insns[i]; +#else + insn = func_node->old_insn; +#endif + list_del_rcu(&func->stack_node); + list_del_rcu(&func_node->node); + kfree(func_node); + +#ifdef CONFIG_ARM64_MODULE_PLTS + for (i = 0; i < LJMP_INSN_SIZE; i++) { + aarch64_insn_patch_text_nosync(((u32 *)pc) + i, + insns[i]); + } +#else + aarch64_insn_patch_text_nosync((void *)pc, insn); +#endif + } else { + list_del_rcu(&func->stack_node); + next_func = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + BUG_ON(!next_func); + new_addr = (unsigned long)next_func->new_func; +#ifdef CONFIG_ARM64_MODULE_PLTS + if (offset_in_range(pc, new_addr, SZ_128M)) { + insn = aarch64_insn_gen_branch_imm(pc, new_addr, + AARCH64_INSN_BRANCH_NOLINK); + + aarch64_insn_patch_text_nosync((void *)pc, insn); + } else { + insns[0] = cpu_to_le32(0x92800010 | (((~new_addr) & 0xffff)) << 5); + insns[1] = cpu_to_le32(0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5); + insns[2] = cpu_to_le32(0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5); + insns[3] = cpu_to_le32(0xd61f0200); + for (i = 0; i < LJMP_INSN_SIZE; i++) + aarch64_insn_patch_text_nosync(((u32 *)pc) + i, + insns[i]); + } +#else + insn = aarch64_insn_gen_branch_imm(pc, new_addr, + AARCH64_INSN_BRANCH_NOLINK); + + aarch64_insn_patch_text_nosync((void *)pc, insn); +#endif + } +} +#endif diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index f0690c2ca3e0f2f27f761a1568e6b3cc637779a4..a32540647c8530c3dd9b4e2d9409a8f53bf78cc8 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -254,6 +254,19 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, sechdrs[i].sh_info, dstsec); } +#ifdef CONFIG_LIVEPATCH + for (i = 0; i < ehdr->e_shnum; i++) { + if (!strcmp(".livepatch.pltcount", + secstrings + sechdrs[i].sh_name)) { + core_plts += sechdrs[i].sh_size; + sechdrs[i].sh_size = 0; + sechdrs[i].sh_type = SHT_NOBITS; + sechdrs[i].sh_flags = 0; + break; + } + } +#endif + mod->arch.core.plt->sh_type = SHT_NOBITS; mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 95f98aca75b998c8dcba31c1c98d547916f4e7bd..4b481176681a39024b68ad67e3f11ed3368fe4b5 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -510,13 +510,6 @@ static int enable_patch(struct klp_patch *patch) if (!klp_is_object_loaded(obj)) continue; - ret = klp_pre_patch_callback(obj); - if (ret) { - pr_warn("pre-patch callback failed for object '%s'\n", - klp_is_module(obj) ? obj->name : "vmlinux"); - goto disable; - } - ret = klp_patch_object(obj); if (ret) { pr_warn("failed to patch object '%s'\n",