提交 1348c3cc 编写于 作者: C Cheng Jian 提交者: Xie XiuQi

livepatch/core: allow implementation without ftrace

euler inclusion
category: feature
Bugzilla: 5507
CVE: N/A

----------------------------------------

support for livepatch without ftrace mode

new config for WO_FTRACE
	CONFIG_LIVEPATCH_WO_FTRACE=y
	CONFIG_LIVEPATCH_STACK=y

Implements livepatch without ftrace by direct jump, we
directly modify the first few instructions(usually one,
but four for long jumps under ARM64) of the old function
as jump instructions by stop_machine, so it will jump to
the first address of the new function when livepatch enable

KERNEL/MODULE
call/bl A---------------old_A------------
                        | jump new_A----+--------|
                        |               |        |
                        |               |        |
                        -----------------        |
                                                 |
                                                 |
                                                 |
livepatch_module-------------                    |
|                           |                    |
|new_A <--------------------+--------------------|
|                           |
|                           |
|---------------------------|
| .plt                      |
| ......PLTS for livepatch  |
-----------------------------

something we need to consider under different architectures:

1. jump instruction
2. partial relocation in new function requires for livepatch.
3. long jumps may be required if the jump address exceeds the
   offset. both for livepatch relocation and livepatch enable.
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
Reviewed-by: NLi Bin <huawei.libin@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 1f6836e2
......@@ -202,7 +202,7 @@ config PPC
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_LIVEPATCH_FTRACE if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
......
......@@ -151,7 +151,7 @@ config S390
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH
select HAVE_LIVEPATCH_FTRACE
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_MEMBLOCK
......
......@@ -166,7 +166,7 @@ config X86
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH if X86_64
select HAVE_LIVEPATCH_FTRACE if X86_64
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MIXED_BREAKPOINTS_REGS
......
......@@ -82,7 +82,9 @@ struct klp_func {
struct list_head stack_node;
unsigned long old_size, new_size;
bool patched;
#ifdef CONFIG_LIVEPATCH_FTRACE
bool transition;
#endif
};
struct klp_object;
......@@ -168,6 +170,7 @@ int klp_disable_patch(struct klp_patch *);
void arch_klp_init_object_loaded(struct klp_patch *patch,
struct klp_object *obj);
#ifdef CONFIG_LIVEPATCH_FTRACE
/* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
......@@ -201,8 +204,18 @@ void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
#else /* !CONFIG_LIVEPATCH */
#else /* !CONFIG_LIVEPATCH_FTRACE */
static inline int klp_module_coming(struct module *mod) { return 0; }
static inline void klp_module_going(struct module *mod) {}
static inline bool klp_patch_pending(struct task_struct *task) { return false; }
static inline void klp_update_patch_state(struct task_struct *task) {}
static inline void klp_copy_process(struct task_struct *child) {}
static inline bool klp_have_reliable_stack(void) { return true; }
#endif /* CONFIG_LIVEPATCH_FTRACE */
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; }
static inline void klp_module_going(struct module *mod) {}
static inline bool klp_patch_pending(struct task_struct *task) { return false; }
......
config HAVE_LIVEPATCH
config HAVE_LIVEPATCH_FTRACE
bool
help
Arch supports kernel live patching
Arch supports kernel live patching based on ftrace
config HAVE_LIVEPATCH_WO_FTRACE
bool
help
Arch supports kernel live patching without ftrace
if HAVE_LIVEPATCH_FTRACE || HAVE_LIVEPATCH_WO_FTRACE
menu "Enable Livepatch"
config LIVEPATCH
bool "Kernel Live Patching"
depends on DYNAMIC_FTRACE_WITH_REGS
depends on MODULES
depends on SYSFS
depends on KALLSYMS_ALL
depends on HAVE_LIVEPATCH
depends on HAVE_LIVEPATCH_FTRACE || HAVE_LIVEPATCH_WO_FTRACE
depends on !TRIM_UNUSED_KSYMS
default n
help
Say Y here if you want to support kernel live patching.
This option has no runtime impact until a kernel "patch"
module uses the interface provided by this option to register
a patch, causing calls to patched functions to be redirected
to new function code contained in the patch module.
choice
prompt "live patching method"
depends on LIVEPATCH
help
Live patching implementation method configuration.
config LIVEPATCH_FTRACE
bool "based on ftrace"
depends on HAVE_LIVEPATCH_FTRACE
depends on DYNAMIC_FTRACE_WITH_REGS
help
Supports kernel live patching based on ftrace
config LIVEPATCH_WO_FTRACE
bool "without ftrace"
depends on HAVE_LIVEPATCH_WO_FTRACE
depends on DEBUG_INFO
help
Supports kernel live patching without ftrace
endchoice
config LIVEPATCH_STACK
bool "Enforcing the patch stacking principle"
depends on LIVEPATCH_FTRACE || LIVEPATCH_WO_FTRACE
default y
help
Say N here if you want to remove the patch stacking principle.
endmenu
endif
obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_LIVEPATCH_FTRACE) += transition.o
obj-$(CONFIG_LIVEPATCH_FTRACE) += shadow.o
livepatch-objs := core.o patch.o shadow.o transition.o
livepatch-objs := core.o patch.o
......@@ -33,7 +33,14 @@
#include <asm/cacheflush.h>
#include "core.h"
#include "patch.h"
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#ifdef CONFIG_LIVEPATCH_FTRACE
#include "transition.h"
#endif
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
#include <linux/stop_machine.h>
#endif
/*
* klp_mutex is a coarse lock which serializes access to klp data. All
......@@ -55,12 +62,12 @@ static bool klp_is_module(struct klp_object *obj)
}
/* sets obj->mod if object is not vmlinux and module is found */
static void klp_find_object_module(struct klp_object *obj)
static int klp_find_object_module(struct klp_object *obj)
{
struct module *mod;
if (!klp_is_module(obj))
return;
return 0;
mutex_lock(&module_mutex);
/*
......@@ -76,10 +83,24 @@ static void klp_find_object_module(struct klp_object *obj)
* until mod->exit() finishes. This is especially important for
* patches that modify semantic of the functions.
*/
#ifdef CONFIG_LIVEPATCH_FTRACE
if (mod && mod->klp_alive)
obj->mod = mod;
#else
if (!mod) {
pr_err("module '%s' not loaded\n", obj->name);
mutex_unlock(&module_mutex);
return -ENOPKG; /* the deponds module is not loaded */
} else if (mod->state == MODULE_STATE_COMING || !try_module_get(mod)) {
mutex_unlock(&module_mutex);
return -EINVAL;
} else {
obj->mod = mod;
}
#endif /* ifdef CONFIG_LIVEPATCH_FTRACE */
mutex_unlock(&module_mutex);
return 0;
}
static bool klp_is_patch_registered(struct klp_patch *patch)
......@@ -152,6 +173,8 @@ static int klp_find_object_symbol(const char *objname, const char *name,
kallsyms_on_each_symbol(klp_find_callback, &args);
mutex_unlock(&module_mutex);
cond_resched();
/*
* Ensure an address was found. If sympos is 0, ensure symbol is unique;
* otherwise ensure the symbol position count matches sympos.
......@@ -278,6 +301,7 @@ static int klp_write_object_relocations(struct module *pmod,
return ret;
}
#ifdef CONFIG_LIVEPATCH_FTRACE
static int __klp_disable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
......@@ -288,10 +312,12 @@ static int __klp_disable_patch(struct klp_patch *patch)
if (klp_transition_patch)
return -EBUSY;
#ifdef CONFIG_LIVEPATCH_STACK
/* enforce stacking: only the last enabled patch can be disabled */
if (!list_is_last(&patch->list, &klp_patches) &&
list_next_entry(patch, list)->enabled)
return -EBUSY;
#endif
klp_init_transition(patch, KLP_UNPATCHED);
......@@ -314,6 +340,52 @@ static int __klp_disable_patch(struct klp_patch *patch)
return 0;
}
#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */
/*
* This function is called from stop_machine() context.
*/
static int disable_patch(struct klp_patch *patch)
{
pr_notice("disabling patch '%s'\n", patch->mod->name);
klp_unpatch_objects(patch);
patch->enabled = false;
module_put(patch->mod);
return 0;
}
int klp_try_disable_patch(void *data)
{
struct klp_patch *patch = data;
int ret = 0;
ret = klp_check_calltrace(patch, 0);
if (ret)
return ret;
ret = disable_patch(patch);
return ret;
}
static int __klp_disable_patch(struct klp_patch *patch)
{
int ret;
#ifdef CONFIG_LIVEPATCH_STACK
/* enforce stacking: only the last enabled patch can be disabled */
if (!list_is_last(&patch->list, &klp_patches) &&
list_next_entry(patch, list)->enabled) {
pr_err("only the last enabled patch can be disabled\n");
return -EBUSY;
}
#endif
ret = stop_machine(klp_try_disable_patch, patch, NULL);
return ret;
}
#endif /* ifdef CONFIG_LIVEPATCH_FTRACE */
/**
* klp_disable_patch() - disables a registered patch
......@@ -347,6 +419,7 @@ int klp_disable_patch(struct klp_patch *patch)
}
EXPORT_SYMBOL_GPL(klp_disable_patch);
#ifdef CONFIG_LIVEPATCH_FTRACE
static int __klp_enable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
......@@ -358,10 +431,12 @@ static int __klp_enable_patch(struct klp_patch *patch)
if (WARN_ON(patch->enabled))
return -EINVAL;
#ifdef CONFIG_LIVEPATCH_STACK
/* enforce stacking: only the first disabled patch can be enabled */
if (patch->list.prev != &klp_patches &&
!list_prev_entry(patch, list)->enabled)
return -EBUSY;
#endif
/*
* A reference is taken on the patch module to prevent it from being
......@@ -413,6 +488,121 @@ static int __klp_enable_patch(struct klp_patch *patch)
klp_cancel_transition();
return ret;
}
#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */
/*
* This function is called from stop_machine() context.
*/
static int enable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
int ret;
pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
if (!try_module_get(patch->mod))
return -ENODEV;
patch->enabled = true;
pr_notice("enabling patch '%s'\n", patch->mod->name);
klp_for_each_object(patch, obj) {
if (!klp_is_object_loaded(obj))
continue;
ret = klp_pre_patch_callback(obj);
if (ret) {
pr_warn("pre-patch callback failed for object '%s'\n",
klp_is_module(obj) ? obj->name : "vmlinux");
goto disable;
}
ret = klp_patch_object(obj);
if (ret) {
pr_warn("failed to patch object '%s'\n",
klp_is_module(obj) ? obj->name : "vmlinux");
goto disable;
}
}
return 0;
disable:
disable_patch(patch);
return ret;
}
struct patch_data {
struct klp_patch *patch;
atomic_t cpu_count;
};
int klp_try_enable_patch(void *data)
{
int ret = 0;
int flag = 0;
struct patch_data *pd = (struct patch_data *)data;
if (atomic_inc_return(&pd->cpu_count) == 1) {
struct klp_patch *patch = pd->patch;
ret = klp_check_calltrace(patch, 1);
if (ret) {
flag = 1;
atomic_inc(&pd->cpu_count);
return ret;
}
ret = enable_patch(patch);
if (ret) {
flag = 1;
atomic_inc(&pd->cpu_count);
return ret;
}
atomic_inc(&pd->cpu_count);
} else {
while (atomic_read(&pd->cpu_count) <= num_online_cpus())
cpu_relax();
if (!flag)
klp_smp_isb();
}
return ret;
}
static int __klp_enable_patch(struct klp_patch *patch)
{
int ret;
struct patch_data patch_data = {
.patch = patch,
.cpu_count = ATOMIC_INIT(0),
};
if (WARN_ON(patch->enabled))
return -EINVAL;
#ifdef CONFIG_LIVEPATCH_STACK
/* enforce stacking: only the first disabled patch can be enabled */
if (patch->list.prev != &klp_patches &&
!list_prev_entry(patch, list)->enabled) {
pr_err("only the first disabled patch can be enabled\n");
return -EBUSY;
}
#endif
ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask);
if (ret)
return ret;
#ifndef CONFIG_LIVEPATCH_STACK
/* move the enabled patch to the list tail */
list_del(&patch->list);
list_add_tail(&patch->list, &klp_patches);
#endif
return 0;
}
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
/**
* klp_enable_patch() - enables a registered patch
......@@ -485,6 +675,7 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
goto err;
}
#ifdef CONFIG_LIVEPATCH_FTRACE
if (patch == klp_transition_patch) {
klp_reverse_transition();
} else if (enabled) {
......@@ -496,6 +687,17 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
if (ret)
goto err;
}
#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */
if (enabled) {
ret = __klp_enable_patch(patch);
if (ret)
goto err;
} else {
ret = __klp_disable_patch(patch);
if (ret)
goto err;
}
#endif
mutex_unlock(&klp_mutex);
......@@ -515,6 +717,7 @@ static ssize_t enabled_show(struct kobject *kobj,
return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
}
#ifdef CONFIG_LIVEPATCH_FTRACE
static ssize_t transition_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
......@@ -582,19 +785,61 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
return count;
}
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
#ifdef CONFIG_LIVEPATCH_FTRACE
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr,
#ifdef CONFIG_LIVEPATCH_FTRACE
&transition_kobj_attr.attr,
&signal_kobj_attr.attr,
&force_kobj_attr.attr,
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
NULL
};
static int state_show(struct seq_file *m, void *v)
{
struct klp_patch *patch;
char *state;
int index = 0;
seq_printf(m, "%-5s\t%-26s\t%-8s\n", "Index", "Patch", "State");
seq_puts(m, "-----------------------------------------------\n");
mutex_lock(&klp_mutex);
list_for_each_entry(patch, &klp_patches, list) {
if (patch->enabled)
state = "enabled";
else
state = "disabled";
seq_printf(m, "%-5d\t%-26s\t%-8s\n", ++index,
patch->mod->name, state);
}
mutex_unlock(&klp_mutex);
seq_puts(m, "-----------------------------------------------\n");
return 0;
}
static int klp_state_open(struct inode *inode, struct file *filp)
{
return single_open(filp, state_show, NULL);
}
static const struct file_operations proc_klpstate_operations = {
.open = klp_state_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void klp_kobj_release_patch(struct kobject *kobj)
{
struct klp_patch *patch;
......@@ -640,6 +885,7 @@ static void klp_free_funcs_limited(struct klp_object *obj,
kobject_put(&func->kobj);
}
#ifdef CONFIG_LIVEPATCH_FTRACE
/* Clean up when a patched object is unloaded */
static void klp_free_object_loaded(struct klp_object *obj)
{
......@@ -650,6 +896,7 @@ static void klp_free_object_loaded(struct klp_object *obj)
klp_for_each_func(obj, func)
func->old_addr = 0;
}
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
/*
* Free all objects' kobjects in the array up to some limit. When limit is
......@@ -661,6 +908,9 @@ static void klp_free_objects_limited(struct klp_patch *patch,
struct klp_object *obj;
for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
if (klp_is_module(obj))
module_put(obj->mod);
klp_free_funcs_limited(obj, NULL);
kobject_put(&obj->kobj);
}
......@@ -683,7 +933,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
INIT_LIST_HEAD(&func->stack_node);
func->patched = false;
#ifdef CONFIG_LIVEPATCH_FTRACE
func->transition = false;
#endif
/* The format for the sysfs directory is <function,sympos> where sympos
* is the nth occurrence of this symbol in kallsyms for the patched
......@@ -760,18 +1012,20 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
obj->patched = false;
obj->mod = NULL;
klp_find_object_module(obj);
ret = klp_find_object_module(obj);
if (ret)
return ret;
name = klp_is_module(obj) ? obj->name : "vmlinux";
ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
&patch->kobj, "%s", name);
if (ret)
return ret;
goto put;
klp_for_each_func(obj, func) {
ret = klp_init_func(obj, func);
if (ret)
goto free;
goto out;
}
if (klp_is_object_loaded(obj)) {
......@@ -784,7 +1038,10 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
free:
klp_free_funcs_limited(obj, func);
out:
kobject_put(&obj->kobj);
put:
module_put(obj->mod);
return ret;
}
......@@ -798,6 +1055,11 @@ static int klp_init_patch(struct klp_patch *patch)
mutex_lock(&klp_mutex);
if (klp_is_patch_registered(patch)) {
mutex_unlock(&klp_mutex);
return -EINVAL;
}
patch->enabled = false;
init_completion(&patch->finish);
......@@ -904,6 +1166,7 @@ int klp_register_patch(struct klp_patch *patch)
}
EXPORT_SYMBOL_GPL(klp_register_patch);
#ifdef CONFIG_LIVEPATCH_FTRACE
/*
* Remove parts of patches that touch a given kernel module. The list of
* patches processed might be limited. When limit is NULL, all patches
......@@ -1045,10 +1308,12 @@ void klp_module_going(struct module *mod)
mutex_unlock(&klp_mutex);
}
#endif /* ifdef CONFIG_LIVEPATCH_FTRACE */
static int __init klp_init(void)
{
int ret;
struct proc_dir_entry *root_klp_dir, *res;
ret = klp_check_compiler_support();
if (ret) {
......@@ -1056,11 +1321,25 @@ static int __init klp_init(void)
return -EINVAL;
}
root_klp_dir = proc_mkdir("livepatch", NULL);
if (!root_klp_dir)
goto error_out;
res = proc_create("livepatch/state", 0, NULL,
&proc_klpstate_operations);
if (!res)
goto error_remove;
klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
if (!klp_root_kobj)
return -ENOMEM;
goto error_remove;
return 0;
error_remove:
remove_proc_entry("livepatch", NULL);
error_out:
return -ENOMEM;
}
module_init(klp_init);
......@@ -32,6 +32,7 @@
#include "patch.h"
#include "transition.h"
#ifdef CONFIG_LIVEPATCH_FTRACE
static LIST_HEAD(klp_ops);
struct klp_ops *klp_find_ops(unsigned long old_addr)
......@@ -236,6 +237,38 @@ static int klp_patch_func(struct klp_func *func)
return ret;
}
#else /* #ifdef CONFIG_LIVEPATCH_WO_FTRACE */
static void klp_unpatch_func(struct klp_func *func)
{
if (WARN_ON(!func->patched))
return;
if (WARN_ON(!func->old_addr))
return;
arch_klp_unpatch_func(func);
func->patched = false;
}
static inline int klp_patch_func(struct klp_func *func)
{
int ret = 0;
if (WARN_ON(!func->old_addr))
return -EINVAL;
if (WARN_ON(func->patched))
return -EINVAL;
ret = arch_klp_patch_func(func);
if (!ret)
func->patched = true;
return ret;
}
#endif
void klp_unpatch_object(struct klp_object *obj)
{
struct klp_func *func;
......
......@@ -22,7 +22,12 @@
struct klp_ops {
struct list_head node;
struct list_head func_stack;
#ifdef CONFIG_LIVEPATCH_FTRACE
struct ftrace_ops fops;
#else /* CONFIG_LIVEPATCH_WO_FTRACE */
struct list_head func_list;
unsigned long old_addr;
#endif
};
struct klp_ops *klp_find_ops(unsigned long old_addr);
......
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-sample.o
ifeq ($(CONFIG_LIVEPATCH_FTRACE), y)
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-mod.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix1.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix2.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-demo.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-mod.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-busymod.o
endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册