提交 5aa9a1a3 编写于 作者: C Cheng Jian 提交者: Xie XiuQi

livepatch/arm64: support livepatch without ftrace

euler inclusion
category: feature
Bugzilla: 5507
CVE: N/A

----------------------------------------

support livepatch without ftrace for ARM64

supported now:
        livepatch relocation when init_patch after load_module;
        instruction patched when enable;
        activeness function check;
        enforcing the patch stacking principle;
        long jump (both livepatch relocation and insn patched)
        module plts request by livepatch-relocation
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
Reviewed-by: NLi Bin <huawei.libin@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 1348c3cc
...@@ -166,6 +166,7 @@ config ARM64 ...@@ -166,6 +166,7 @@ config ARM64
select SWIOTLB select SWIOTLB
select SYSCTL_EXCEPTION_TRACE select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK select THREAD_INFO_IN_TASK
select HAVE_LIVEPATCH_WO_FTRACE if !ARM64_ERRATUM_843419
help help
ARM 64-bit (AArch64) Linux support. ARM 64-bit (AArch64) Linux support.
...@@ -282,6 +283,8 @@ config ARCH_PROC_KCORE_TEXT ...@@ -282,6 +283,8 @@ config ARCH_PROC_KCORE_TEXT
source "arch/arm64/Kconfig.platforms" source "arch/arm64/Kconfig.platforms"
source "kernel/livepatch/Kconfig"
menu "Bus support" menu "Bus support"
config PCI config PCI
......
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2014-2019, Huawei.
* Author: Li Bin <huawei.libin@huawei.com>
* Author: Cheng Jian <cj.chengjian@huawei.com>
*
* livepatch.h - arm64-specific Kernel Live Patching Core
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASM_ARM64_LIVEPATCH_H
#define _ASM_ARM64_LIVEPATCH_H
#include <linux/module.h>
#include <linux/livepatch.h>
#ifdef CONFIG_LIVEPATCH
struct klp_patch;
struct klp_func;
#define klp_smp_isb() isb()
static inline int klp_check_compiler_support(void)
{
return 0;
}
int arch_klp_patch_func(struct klp_func *func);
void arch_klp_unpatch_func(struct klp_func *func);
int klp_check_calltrace(struct klp_patch *patch, int enable);
#else
#error Live patching support is disabled; check CONFIG_LIVEPATCH
#endif
#endif /* _ASM_ARM64_LIVEPATCH_H */
...@@ -37,6 +37,7 @@ arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o ...@@ -37,6 +37,7 @@ arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
arm64-obj-$(CONFIG_LIVEPATCH) += livepatch.o
arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \
......
// SPDX-License-Identifier: GPL-2.0
/*
* livepatch.c - arm64-specific Kernel Live Patching Core
*
* Copyright (C) 2014 Li Bin <huawei.libin@huawei.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/livepatch.h>
#include <asm/livepatch.h>
#include <asm/stacktrace.h>
#include <asm/cacheflush.h>
#include <linux/slab.h>
#include <asm/insn.h>
#include <asm-generic/sections.h>
#include <asm/ptrace.h>
#include <linux/ftrace.h>
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
#include <linux/sched/debug.h>
#endif
#ifdef CONFIG_ARM64_MODULE_PLTS
static inline bool offset_in_range(unsigned long pc, unsigned long addr,
long range)
{
long offset = addr - pc;
return (offset >= -range && offset < range);
}
#endif
struct walk_stackframe_args {
struct klp_patch *patch;
int enable;
int ret;
};
static inline int klp_compare_address(unsigned long pc, unsigned long func_addr,
unsigned long func_size, const char *func_name)
{
if (pc >= func_addr && pc < func_addr + func_size) {
pr_err("func %s is in use!\n", func_name);
return -EBUSY;
}
return 0;
}
static int klp_check_activeness_func(struct stackframe *frame, void *data)
{
struct walk_stackframe_args *args = data;
struct klp_patch *patch = args->patch;
struct klp_object *obj;
struct klp_func *func;
unsigned long func_addr, func_size;
const char *func_name;
if (args->ret)
return args->ret;
for (obj = patch->objs; obj->funcs; obj++) {
for (func = obj->funcs; func->old_name; func++) {
if (args->enable) {
func_addr = func->old_addr;
func_size = func->old_size;
} else {
func_addr = (unsigned long)func->new_func;
func_size = func->new_size;
}
func_name = func->old_name;
args->ret = klp_compare_address(frame->pc, func_addr,
func_size, func_name);
if (args->ret)
return args->ret;
}
}
return args->ret;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static unsigned long klp_ftrace_graph_addr(unsigned long addr,
struct task_struct *tsk,
int *graph)
{
unsigned long ret_addr = 0;
int index = tsk->curr_ret_stack;
if ((addr + 4) != (unsigned long)return_to_handler)
return ret_addr;
if (!tsk->ret_stack || index < *graph)
return ret_addr;
index -= *graph;
ret_addr = tsk->ret_stack[index].ret;
(*graph)++;
return ret_addr;
}
#else
static unsigned long klp_ftrace_graph_addr(unsigned long addr,
struct task_struct *tsk,
int *graph)
{
return 0;
}
#endif
void notrace klp_walk_stackframe(struct stackframe *frame,
int (*fn)(struct stackframe *, void *),
struct task_struct *tsk, void *data)
{
unsigned long addr;
int graph = 0;
while (1) {
int ret;
if (fn(frame, data))
break;
ret = unwind_frame(NULL, frame);
if (ret < 0)
break;
addr = klp_ftrace_graph_addr(frame->pc, tsk, &graph);
if (addr)
frame->pc = addr;
}
}
int klp_check_calltrace(struct klp_patch *patch, int enable)
{
struct task_struct *g, *t;
struct stackframe frame;
int ret = 0;
struct walk_stackframe_args args = {
.patch = patch,
.enable = enable,
.ret = 0
};
do_each_thread(g, t) {
frame.fp = thread_saved_fp(t);
frame.pc = thread_saved_pc(t);
klp_walk_stackframe(&frame, klp_check_activeness_func,
t, &args);
if (args.ret) {
ret = args.ret;
pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm);
show_stack(t, NULL);
goto out;
}
} while_each_thread(g, t);
out:
return ret;
}
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
#define LJMP_INSN_SIZE 4
struct klp_func_node {
struct list_head node;
struct list_head func_stack;
unsigned long old_addr;
#ifdef CONFIG_ARM64_MODULE_PLTS
u32 old_insns[LJMP_INSN_SIZE];
#else
u32 old_insn;
#endif
};
static LIST_HEAD(klp_func_list);
static struct klp_func_node *klp_find_func_node(unsigned long old_addr)
{
struct klp_func_node *func_node;
list_for_each_entry(func_node, &klp_func_list, node) {
if (func_node->old_addr == old_addr)
return func_node;
}
return NULL;
}
int arch_klp_patch_func(struct klp_func *func)
{
struct klp_func_node *func_node;
unsigned long pc, new_addr;
u32 insn;
u32 memory_flag = 0;
#ifdef CONFIG_ARM64_MODULE_PLTS
int i;
u32 insns[LJMP_INSN_SIZE];
#endif
func_node = klp_find_func_node(func->old_addr);
if (!func_node) {
func_node = kzalloc(sizeof(*func_node), GFP_KERNEL);
if (!func_node)
return -ENOMEM;
memory_flag = 1;
INIT_LIST_HEAD(&func_node->func_stack);
func_node->old_addr = func->old_addr;
#ifdef CONFIG_ARM64_MODULE_PLTS
for (i = 0; i < LJMP_INSN_SIZE; i++) {
aarch64_insn_read(((u32 *)func->old_addr) + i,
&func_node->old_insns[i]);
}
#else
aarch64_insn_read((void *)func->old_addr, &func_node->old_insn);
#endif
list_add_rcu(&func_node->node, &klp_func_list);
}
list_add_rcu(&func->stack_node, &func_node->func_stack);
pc = func->old_addr;
new_addr = (unsigned long)func->new_func;
#ifdef CONFIG_ARM64_MODULE_PLTS
if (offset_in_range(pc, new_addr, SZ_128M)) {
insn = aarch64_insn_gen_branch_imm(pc, new_addr,
AARCH64_INSN_BRANCH_NOLINK);
if (aarch64_insn_patch_text_nosync((void *)pc, insn))
goto ERR_OUT;
} else {
insns[0] = cpu_to_le32(0x92800010 | (((~new_addr) & 0xffff)) << 5);
insns[1] = cpu_to_le32(0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5);
insns[2] = cpu_to_le32(0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5);
insns[3] = cpu_to_le32(0xd61f0200);
for (i = 0; i < LJMP_INSN_SIZE; i++) {
if (aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i]))
goto ERR_OUT;
}
}
#else
insn = aarch64_insn_gen_branch_imm(pc, new_addr,
AARCH64_INSN_BRANCH_NOLINK);
if (aarch64_insn_patch_text_nosync((void *)pc, insn))
goto ERR_OUT;
#endif
return 0;
ERR_OUT:
if (memory_flag) {
list_del_rcu(&func->stack_node);
list_del_rcu(&func_node->node);
kfree(func_node);
}
return -EPERM;
}
void arch_klp_unpatch_func(struct klp_func *func)
{
struct klp_func_node *func_node;
struct klp_func *next_func;
unsigned long pc, new_addr;
u32 insn;
#ifdef CONFIG_ARM64_MODULE_PLTS
int i;
u32 insns[LJMP_INSN_SIZE];
#endif
func_node = klp_find_func_node(func->old_addr);
BUG_ON(!func_node);
pc = func_node->old_addr;
if (list_is_singular(&func_node->func_stack)) {
#ifdef CONFIG_ARM64_MODULE_PLTS
for (i = 0; i < LJMP_INSN_SIZE; i++)
insns[i] = func_node->old_insns[i];
#else
insn = func_node->old_insn;
#endif
list_del_rcu(&func->stack_node);
list_del_rcu(&func_node->node);
kfree(func_node);
#ifdef CONFIG_ARM64_MODULE_PLTS
for (i = 0; i < LJMP_INSN_SIZE; i++) {
aarch64_insn_patch_text_nosync(((u32 *)pc) + i,
insns[i]);
}
#else
aarch64_insn_patch_text_nosync((void *)pc, insn);
#endif
} else {
list_del_rcu(&func->stack_node);
next_func = list_first_or_null_rcu(&func_node->func_stack,
struct klp_func, stack_node);
BUG_ON(!next_func);
new_addr = (unsigned long)next_func->new_func;
#ifdef CONFIG_ARM64_MODULE_PLTS
if (offset_in_range(pc, new_addr, SZ_128M)) {
insn = aarch64_insn_gen_branch_imm(pc, new_addr,
AARCH64_INSN_BRANCH_NOLINK);
aarch64_insn_patch_text_nosync((void *)pc, insn);
} else {
insns[0] = cpu_to_le32(0x92800010 | (((~new_addr) & 0xffff)) << 5);
insns[1] = cpu_to_le32(0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5);
insns[2] = cpu_to_le32(0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5);
insns[3] = cpu_to_le32(0xd61f0200);
for (i = 0; i < LJMP_INSN_SIZE; i++)
aarch64_insn_patch_text_nosync(((u32 *)pc) + i,
insns[i]);
}
#else
insn = aarch64_insn_gen_branch_imm(pc, new_addr,
AARCH64_INSN_BRANCH_NOLINK);
aarch64_insn_patch_text_nosync((void *)pc, insn);
#endif
}
}
#endif
...@@ -254,6 +254,19 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, ...@@ -254,6 +254,19 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
sechdrs[i].sh_info, dstsec); sechdrs[i].sh_info, dstsec);
} }
#ifdef CONFIG_LIVEPATCH
for (i = 0; i < ehdr->e_shnum; i++) {
if (!strcmp(".livepatch.pltcount",
secstrings + sechdrs[i].sh_name)) {
core_plts += sechdrs[i].sh_size;
sechdrs[i].sh_size = 0;
sechdrs[i].sh_type = SHT_NOBITS;
sechdrs[i].sh_flags = 0;
break;
}
}
#endif
mod->arch.core.plt->sh_type = SHT_NOBITS; mod->arch.core.plt->sh_type = SHT_NOBITS;
mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES; mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
......
...@@ -510,13 +510,6 @@ static int enable_patch(struct klp_patch *patch) ...@@ -510,13 +510,6 @@ static int enable_patch(struct klp_patch *patch)
if (!klp_is_object_loaded(obj)) if (!klp_is_object_loaded(obj))
continue; continue;
ret = klp_pre_patch_callback(obj);
if (ret) {
pr_warn("pre-patch callback failed for object '%s'\n",
klp_is_module(obj) ? obj->name : "vmlinux");
goto disable;
}
ret = klp_patch_object(obj); ret = klp_patch_object(obj);
if (ret) { if (ret) {
pr_warn("failed to patch object '%s'\n", pr_warn("failed to patch object '%s'\n",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册