提交 1348c3cc 编写于 作者: C Cheng Jian 提交者: Xie XiuQi

livepatch/core: allow implementation without ftrace

euler inclusion
category: feature
Bugzilla: 5507
CVE: N/A

----------------------------------------

support for livepatch without ftrace mode

new config for WO_FTRACE
	CONFIG_LIVEPATCH_WO_FTRACE=y
	CONFIG_LIVEPATCH_STACK=y

Implements livepatch without ftrace by direct jump, we
directly modify the first few instructions(usually one,
but four for long jumps under ARM64) of the old function
as jump instructions by stop_machine, so it will jump to
the first address of the new function when livepatch enable

KERNEL/MODULE
call/bl A---------------old_A------------
                        | jump new_A----+--------|
                        |               |        |
                        |               |        |
                        -----------------        |
                                                 |
                                                 |
                                                 |
livepatch_module-------------                    |
|                           |                    |
|new_A <--------------------+--------------------|
|                           |
|                           |
|---------------------------|
| .plt                      |
| ......PLTS for livepatch  |
-----------------------------

something we need to consider under different architectures:

1. jump instruction
2. partial relocation in new function requires for livepatch.
3. long jumps may be required if the jump address exceeds the
   offset. both for livepatch relocation and livepatch enable.
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
Reviewed-by: NLi Bin <huawei.libin@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 1f6836e2
...@@ -202,7 +202,7 @@ config PPC ...@@ -202,7 +202,7 @@ config PPC
select HAVE_KPROBES_ON_FTRACE select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_LIVEPATCH_FTRACE if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
......
...@@ -151,7 +151,7 @@ config S390 ...@@ -151,7 +151,7 @@ config S390
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_KRETPROBES select HAVE_KRETPROBES
select HAVE_KVM select HAVE_KVM
select HAVE_LIVEPATCH select HAVE_LIVEPATCH_FTRACE
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
......
...@@ -166,7 +166,7 @@ config X86 ...@@ -166,7 +166,7 @@ config X86
select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_KRETPROBES select HAVE_KRETPROBES
select HAVE_KVM select HAVE_KVM
select HAVE_LIVEPATCH if X86_64 select HAVE_LIVEPATCH_FTRACE if X86_64
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MIXED_BREAKPOINTS_REGS select HAVE_MIXED_BREAKPOINTS_REGS
......
...@@ -82,7 +82,9 @@ struct klp_func { ...@@ -82,7 +82,9 @@ struct klp_func {
struct list_head stack_node; struct list_head stack_node;
unsigned long old_size, new_size; unsigned long old_size, new_size;
bool patched; bool patched;
#ifdef CONFIG_LIVEPATCH_FTRACE
bool transition; bool transition;
#endif
}; };
struct klp_object; struct klp_object;
...@@ -168,6 +170,7 @@ int klp_disable_patch(struct klp_patch *); ...@@ -168,6 +170,7 @@ int klp_disable_patch(struct klp_patch *);
void arch_klp_init_object_loaded(struct klp_patch *patch, void arch_klp_init_object_loaded(struct klp_patch *patch,
struct klp_object *obj); struct klp_object *obj);
#ifdef CONFIG_LIVEPATCH_FTRACE
/* Called from the module loader during module coming/going states */ /* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod); int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod); void klp_module_going(struct module *mod);
...@@ -201,8 +204,18 @@ void *klp_shadow_get_or_alloc(void *obj, unsigned long id, ...@@ -201,8 +204,18 @@ void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor); void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
#else /* !CONFIG_LIVEPATCH */ #else /* !CONFIG_LIVEPATCH_FTRACE */
static inline int klp_module_coming(struct module *mod) { return 0; }
static inline void klp_module_going(struct module *mod) {}
static inline bool klp_patch_pending(struct task_struct *task) { return false; }
static inline void klp_update_patch_state(struct task_struct *task) {}
static inline void klp_copy_process(struct task_struct *child) {}
static inline bool klp_have_reliable_stack(void) { return true; }
#endif /* CONFIG_LIVEPATCH_FTRACE */
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; } static inline int klp_module_coming(struct module *mod) { return 0; }
static inline void klp_module_going(struct module *mod) {} static inline void klp_module_going(struct module *mod) {}
static inline bool klp_patch_pending(struct task_struct *task) { return false; } static inline bool klp_patch_pending(struct task_struct *task) { return false; }
......
config HAVE_LIVEPATCH config HAVE_LIVEPATCH_FTRACE
bool bool
help help
Arch supports kernel live patching Arch supports kernel live patching based on ftrace
config HAVE_LIVEPATCH_WO_FTRACE
bool
help
Arch supports kernel live patching without ftrace
if HAVE_LIVEPATCH_FTRACE || HAVE_LIVEPATCH_WO_FTRACE
menu "Enable Livepatch"
config LIVEPATCH config LIVEPATCH
bool "Kernel Live Patching" bool "Kernel Live Patching"
depends on DYNAMIC_FTRACE_WITH_REGS
depends on MODULES depends on MODULES
depends on SYSFS depends on SYSFS
depends on KALLSYMS_ALL depends on KALLSYMS_ALL
depends on HAVE_LIVEPATCH depends on HAVE_LIVEPATCH_FTRACE || HAVE_LIVEPATCH_WO_FTRACE
depends on !TRIM_UNUSED_KSYMS depends on !TRIM_UNUSED_KSYMS
default n
help help
Say Y here if you want to support kernel live patching. Say Y here if you want to support kernel live patching.
This option has no runtime impact until a kernel "patch" This option has no runtime impact until a kernel "patch"
module uses the interface provided by this option to register module uses the interface provided by this option to register
a patch, causing calls to patched functions to be redirected a patch, causing calls to patched functions to be redirected
to new function code contained in the patch module. to new function code contained in the patch module.
choice
prompt "live patching method"
depends on LIVEPATCH
help
Live patching implementation method configuration.
config LIVEPATCH_FTRACE
bool "based on ftrace"
depends on HAVE_LIVEPATCH_FTRACE
depends on DYNAMIC_FTRACE_WITH_REGS
help
Supports kernel live patching based on ftrace
config LIVEPATCH_WO_FTRACE
bool "without ftrace"
depends on HAVE_LIVEPATCH_WO_FTRACE
depends on DEBUG_INFO
help
Supports kernel live patching without ftrace
endchoice
config LIVEPATCH_STACK
bool "Enforcing the patch stacking principle"
depends on LIVEPATCH_FTRACE || LIVEPATCH_WO_FTRACE
default y
help
Say N here if you want to remove the patch stacking principle.
endmenu
endif
obj-$(CONFIG_LIVEPATCH) += livepatch.o obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_LIVEPATCH_FTRACE) += transition.o
obj-$(CONFIG_LIVEPATCH_FTRACE) += shadow.o
livepatch-objs := core.o patch.o shadow.o transition.o livepatch-objs := core.o patch.o
...@@ -33,7 +33,14 @@ ...@@ -33,7 +33,14 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include "core.h" #include "core.h"
#include "patch.h" #include "patch.h"
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#ifdef CONFIG_LIVEPATCH_FTRACE
#include "transition.h" #include "transition.h"
#endif
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
#include <linux/stop_machine.h>
#endif
/* /*
* klp_mutex is a coarse lock which serializes access to klp data. All * klp_mutex is a coarse lock which serializes access to klp data. All
...@@ -55,12 +62,12 @@ static bool klp_is_module(struct klp_object *obj) ...@@ -55,12 +62,12 @@ static bool klp_is_module(struct klp_object *obj)
} }
/* sets obj->mod if object is not vmlinux and module is found */ /* sets obj->mod if object is not vmlinux and module is found */
static void klp_find_object_module(struct klp_object *obj) static int klp_find_object_module(struct klp_object *obj)
{ {
struct module *mod; struct module *mod;
if (!klp_is_module(obj)) if (!klp_is_module(obj))
return; return 0;
mutex_lock(&module_mutex); mutex_lock(&module_mutex);
/* /*
...@@ -76,10 +83,24 @@ static void klp_find_object_module(struct klp_object *obj) ...@@ -76,10 +83,24 @@ static void klp_find_object_module(struct klp_object *obj)
* until mod->exit() finishes. This is especially important for * until mod->exit() finishes. This is especially important for
* patches that modify semantic of the functions. * patches that modify semantic of the functions.
*/ */
#ifdef CONFIG_LIVEPATCH_FTRACE
if (mod && mod->klp_alive) if (mod && mod->klp_alive)
obj->mod = mod; obj->mod = mod;
#else
if (!mod) {
pr_err("module '%s' not loaded\n", obj->name);
mutex_unlock(&module_mutex);
return -ENOPKG; /* the deponds module is not loaded */
} else if (mod->state == MODULE_STATE_COMING || !try_module_get(mod)) {
mutex_unlock(&module_mutex);
return -EINVAL;
} else {
obj->mod = mod;
}
#endif /* ifdef CONFIG_LIVEPATCH_FTRACE */
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
return 0;
} }
static bool klp_is_patch_registered(struct klp_patch *patch) static bool klp_is_patch_registered(struct klp_patch *patch)
...@@ -152,6 +173,8 @@ static int klp_find_object_symbol(const char *objname, const char *name, ...@@ -152,6 +173,8 @@ static int klp_find_object_symbol(const char *objname, const char *name,
kallsyms_on_each_symbol(klp_find_callback, &args); kallsyms_on_each_symbol(klp_find_callback, &args);
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
cond_resched();
/* /*
* Ensure an address was found. If sympos is 0, ensure symbol is unique; * Ensure an address was found. If sympos is 0, ensure symbol is unique;
* otherwise ensure the symbol position count matches sympos. * otherwise ensure the symbol position count matches sympos.
...@@ -278,6 +301,7 @@ static int klp_write_object_relocations(struct module *pmod, ...@@ -278,6 +301,7 @@ static int klp_write_object_relocations(struct module *pmod,
return ret; return ret;
} }
#ifdef CONFIG_LIVEPATCH_FTRACE
static int __klp_disable_patch(struct klp_patch *patch) static int __klp_disable_patch(struct klp_patch *patch)
{ {
struct klp_object *obj; struct klp_object *obj;
...@@ -288,10 +312,12 @@ static int __klp_disable_patch(struct klp_patch *patch) ...@@ -288,10 +312,12 @@ static int __klp_disable_patch(struct klp_patch *patch)
if (klp_transition_patch) if (klp_transition_patch)
return -EBUSY; return -EBUSY;
#ifdef CONFIG_LIVEPATCH_STACK
/* enforce stacking: only the last enabled patch can be disabled */ /* enforce stacking: only the last enabled patch can be disabled */
if (!list_is_last(&patch->list, &klp_patches) && if (!list_is_last(&patch->list, &klp_patches) &&
list_next_entry(patch, list)->enabled) list_next_entry(patch, list)->enabled)
return -EBUSY; return -EBUSY;
#endif
klp_init_transition(patch, KLP_UNPATCHED); klp_init_transition(patch, KLP_UNPATCHED);
...@@ -314,6 +340,52 @@ static int __klp_disable_patch(struct klp_patch *patch) ...@@ -314,6 +340,52 @@ static int __klp_disable_patch(struct klp_patch *patch)
return 0; return 0;
} }
#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */
/*
* This function is called from stop_machine() context.
*/
static int disable_patch(struct klp_patch *patch)
{
pr_notice("disabling patch '%s'\n", patch->mod->name);
klp_unpatch_objects(patch);
patch->enabled = false;
module_put(patch->mod);
return 0;
}
int klp_try_disable_patch(void *data)
{
struct klp_patch *patch = data;
int ret = 0;
ret = klp_check_calltrace(patch, 0);
if (ret)
return ret;
ret = disable_patch(patch);
return ret;
}
static int __klp_disable_patch(struct klp_patch *patch)
{
int ret;
#ifdef CONFIG_LIVEPATCH_STACK
/* enforce stacking: only the last enabled patch can be disabled */
if (!list_is_last(&patch->list, &klp_patches) &&
list_next_entry(patch, list)->enabled) {
pr_err("only the last enabled patch can be disabled\n");
return -EBUSY;
}
#endif
ret = stop_machine(klp_try_disable_patch, patch, NULL);
return ret;
}
#endif /* ifdef CONFIG_LIVEPATCH_FTRACE */
/** /**
* klp_disable_patch() - disables a registered patch * klp_disable_patch() - disables a registered patch
...@@ -347,6 +419,7 @@ int klp_disable_patch(struct klp_patch *patch) ...@@ -347,6 +419,7 @@ int klp_disable_patch(struct klp_patch *patch)
} }
EXPORT_SYMBOL_GPL(klp_disable_patch); EXPORT_SYMBOL_GPL(klp_disable_patch);
#ifdef CONFIG_LIVEPATCH_FTRACE
static int __klp_enable_patch(struct klp_patch *patch) static int __klp_enable_patch(struct klp_patch *patch)
{ {
struct klp_object *obj; struct klp_object *obj;
...@@ -358,10 +431,12 @@ static int __klp_enable_patch(struct klp_patch *patch) ...@@ -358,10 +431,12 @@ static int __klp_enable_patch(struct klp_patch *patch)
if (WARN_ON(patch->enabled)) if (WARN_ON(patch->enabled))
return -EINVAL; return -EINVAL;
#ifdef CONFIG_LIVEPATCH_STACK
/* enforce stacking: only the first disabled patch can be enabled */ /* enforce stacking: only the first disabled patch can be enabled */
if (patch->list.prev != &klp_patches && if (patch->list.prev != &klp_patches &&
!list_prev_entry(patch, list)->enabled) !list_prev_entry(patch, list)->enabled)
return -EBUSY; return -EBUSY;
#endif
/* /*
* A reference is taken on the patch module to prevent it from being * A reference is taken on the patch module to prevent it from being
...@@ -413,6 +488,121 @@ static int __klp_enable_patch(struct klp_patch *patch) ...@@ -413,6 +488,121 @@ static int __klp_enable_patch(struct klp_patch *patch)
klp_cancel_transition(); klp_cancel_transition();
return ret; return ret;
} }
#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */
/*
* This function is called from stop_machine() context.
*/
static int enable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
int ret;
pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
if (!try_module_get(patch->mod))
return -ENODEV;
patch->enabled = true;
pr_notice("enabling patch '%s'\n", patch->mod->name);
klp_for_each_object(patch, obj) {
if (!klp_is_object_loaded(obj))
continue;
ret = klp_pre_patch_callback(obj);
if (ret) {
pr_warn("pre-patch callback failed for object '%s'\n",
klp_is_module(obj) ? obj->name : "vmlinux");
goto disable;
}
ret = klp_patch_object(obj);
if (ret) {
pr_warn("failed to patch object '%s'\n",
klp_is_module(obj) ? obj->name : "vmlinux");
goto disable;
}
}
return 0;
disable:
disable_patch(patch);
return ret;
}
struct patch_data {
struct klp_patch *patch;
atomic_t cpu_count;
};
int klp_try_enable_patch(void *data)
{
int ret = 0;
int flag = 0;
struct patch_data *pd = (struct patch_data *)data;
if (atomic_inc_return(&pd->cpu_count) == 1) {
struct klp_patch *patch = pd->patch;
ret = klp_check_calltrace(patch, 1);
if (ret) {
flag = 1;
atomic_inc(&pd->cpu_count);
return ret;
}
ret = enable_patch(patch);
if (ret) {
flag = 1;
atomic_inc(&pd->cpu_count);
return ret;
}
atomic_inc(&pd->cpu_count);
} else {
while (atomic_read(&pd->cpu_count) <= num_online_cpus())
cpu_relax();
if (!flag)
klp_smp_isb();
}
return ret;
}
static int __klp_enable_patch(struct klp_patch *patch)
{
int ret;
struct patch_data patch_data = {
.patch = patch,
.cpu_count = ATOMIC_INIT(0),
};
if (WARN_ON(patch->enabled))
return -EINVAL;
#ifdef CONFIG_LIVEPATCH_STACK
/* enforce stacking: only the first disabled patch can be enabled */
if (patch->list.prev != &klp_patches &&
!list_prev_entry(patch, list)->enabled) {
pr_err("only the first disabled patch can be enabled\n");
return -EBUSY;
}
#endif
ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask);
if (ret)
return ret;
#ifndef CONFIG_LIVEPATCH_STACK
/* move the enabled patch to the list tail */
list_del(&patch->list);
list_add_tail(&patch->list, &klp_patches);
#endif
return 0;
}
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
/** /**
* klp_enable_patch() - enables a registered patch * klp_enable_patch() - enables a registered patch
...@@ -485,6 +675,7 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, ...@@ -485,6 +675,7 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
goto err; goto err;
} }
#ifdef CONFIG_LIVEPATCH_FTRACE
if (patch == klp_transition_patch) { if (patch == klp_transition_patch) {
klp_reverse_transition(); klp_reverse_transition();
} else if (enabled) { } else if (enabled) {
...@@ -496,6 +687,17 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, ...@@ -496,6 +687,17 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
if (ret) if (ret)
goto err; goto err;
} }
#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */
if (enabled) {
ret = __klp_enable_patch(patch);
if (ret)
goto err;
} else {
ret = __klp_disable_patch(patch);
if (ret)
goto err;
}
#endif
mutex_unlock(&klp_mutex); mutex_unlock(&klp_mutex);
...@@ -515,6 +717,7 @@ static ssize_t enabled_show(struct kobject *kobj, ...@@ -515,6 +717,7 @@ static ssize_t enabled_show(struct kobject *kobj,
return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled); return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
} }
#ifdef CONFIG_LIVEPATCH_FTRACE
static ssize_t transition_show(struct kobject *kobj, static ssize_t transition_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
...@@ -582,19 +785,61 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, ...@@ -582,19 +785,61 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
return count; return count;
} }
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
#ifdef CONFIG_LIVEPATCH_FTRACE
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal); static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
static struct attribute *klp_patch_attrs[] = { static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr, &enabled_kobj_attr.attr,
#ifdef CONFIG_LIVEPATCH_FTRACE
&transition_kobj_attr.attr, &transition_kobj_attr.attr,
&signal_kobj_attr.attr, &signal_kobj_attr.attr,
&force_kobj_attr.attr, &force_kobj_attr.attr,
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
NULL NULL
}; };
static int state_show(struct seq_file *m, void *v)
{
struct klp_patch *patch;
char *state;
int index = 0;
seq_printf(m, "%-5s\t%-26s\t%-8s\n", "Index", "Patch", "State");
seq_puts(m, "-----------------------------------------------\n");
mutex_lock(&klp_mutex);
list_for_each_entry(patch, &klp_patches, list) {
if (patch->enabled)
state = "enabled";
else
state = "disabled";
seq_printf(m, "%-5d\t%-26s\t%-8s\n", ++index,
patch->mod->name, state);
}
mutex_unlock(&klp_mutex);
seq_puts(m, "-----------------------------------------------\n");
return 0;
}
static int klp_state_open(struct inode *inode, struct file *filp)
{
return single_open(filp, state_show, NULL);
}
static const struct file_operations proc_klpstate_operations = {
.open = klp_state_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void klp_kobj_release_patch(struct kobject *kobj) static void klp_kobj_release_patch(struct kobject *kobj)
{ {
struct klp_patch *patch; struct klp_patch *patch;
...@@ -640,6 +885,7 @@ static void klp_free_funcs_limited(struct klp_object *obj, ...@@ -640,6 +885,7 @@ static void klp_free_funcs_limited(struct klp_object *obj,
kobject_put(&func->kobj); kobject_put(&func->kobj);
} }
#ifdef CONFIG_LIVEPATCH_FTRACE
/* Clean up when a patched object is unloaded */ /* Clean up when a patched object is unloaded */
static void klp_free_object_loaded(struct klp_object *obj) static void klp_free_object_loaded(struct klp_object *obj)
{ {
...@@ -650,6 +896,7 @@ static void klp_free_object_loaded(struct klp_object *obj) ...@@ -650,6 +896,7 @@ static void klp_free_object_loaded(struct klp_object *obj)
klp_for_each_func(obj, func) klp_for_each_func(obj, func)
func->old_addr = 0; func->old_addr = 0;
} }
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
/* /*
* Free all objects' kobjects in the array up to some limit. When limit is * Free all objects' kobjects in the array up to some limit. When limit is
...@@ -661,6 +908,9 @@ static void klp_free_objects_limited(struct klp_patch *patch, ...@@ -661,6 +908,9 @@ static void klp_free_objects_limited(struct klp_patch *patch,
struct klp_object *obj; struct klp_object *obj;
for (obj = patch->objs; obj->funcs && obj != limit; obj++) { for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
if (klp_is_module(obj))
module_put(obj->mod);
klp_free_funcs_limited(obj, NULL); klp_free_funcs_limited(obj, NULL);
kobject_put(&obj->kobj); kobject_put(&obj->kobj);
} }
...@@ -683,7 +933,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) ...@@ -683,7 +933,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
INIT_LIST_HEAD(&func->stack_node); INIT_LIST_HEAD(&func->stack_node);
func->patched = false; func->patched = false;
#ifdef CONFIG_LIVEPATCH_FTRACE
func->transition = false; func->transition = false;
#endif
/* The format for the sysfs directory is <function,sympos> where sympos /* The format for the sysfs directory is <function,sympos> where sympos
* is the nth occurrence of this symbol in kallsyms for the patched * is the nth occurrence of this symbol in kallsyms for the patched
...@@ -760,18 +1012,20 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) ...@@ -760,18 +1012,20 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
obj->patched = false; obj->patched = false;
obj->mod = NULL; obj->mod = NULL;
klp_find_object_module(obj); ret = klp_find_object_module(obj);
if (ret)
return ret;
name = klp_is_module(obj) ? obj->name : "vmlinux"; name = klp_is_module(obj) ? obj->name : "vmlinux";
ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object, ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
&patch->kobj, "%s", name); &patch->kobj, "%s", name);
if (ret) if (ret)
return ret; goto put;
klp_for_each_func(obj, func) { klp_for_each_func(obj, func) {
ret = klp_init_func(obj, func); ret = klp_init_func(obj, func);
if (ret) if (ret)
goto free; goto out;
} }
if (klp_is_object_loaded(obj)) { if (klp_is_object_loaded(obj)) {
...@@ -784,7 +1038,10 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) ...@@ -784,7 +1038,10 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
free: free:
klp_free_funcs_limited(obj, func); klp_free_funcs_limited(obj, func);
out:
kobject_put(&obj->kobj); kobject_put(&obj->kobj);
put:
module_put(obj->mod);
return ret; return ret;
} }
...@@ -798,6 +1055,11 @@ static int klp_init_patch(struct klp_patch *patch) ...@@ -798,6 +1055,11 @@ static int klp_init_patch(struct klp_patch *patch)
mutex_lock(&klp_mutex); mutex_lock(&klp_mutex);
if (klp_is_patch_registered(patch)) {
mutex_unlock(&klp_mutex);
return -EINVAL;
}
patch->enabled = false; patch->enabled = false;
init_completion(&patch->finish); init_completion(&patch->finish);
...@@ -904,6 +1166,7 @@ int klp_register_patch(struct klp_patch *patch) ...@@ -904,6 +1166,7 @@ int klp_register_patch(struct klp_patch *patch)
} }
EXPORT_SYMBOL_GPL(klp_register_patch); EXPORT_SYMBOL_GPL(klp_register_patch);
#ifdef CONFIG_LIVEPATCH_FTRACE
/* /*
* Remove parts of patches that touch a given kernel module. The list of * Remove parts of patches that touch a given kernel module. The list of
* patches processed might be limited. When limit is NULL, all patches * patches processed might be limited. When limit is NULL, all patches
...@@ -1045,10 +1308,12 @@ void klp_module_going(struct module *mod) ...@@ -1045,10 +1308,12 @@ void klp_module_going(struct module *mod)
mutex_unlock(&klp_mutex); mutex_unlock(&klp_mutex);
} }
#endif /* ifdef CONFIG_LIVEPATCH_FTRACE */
static int __init klp_init(void) static int __init klp_init(void)
{ {
int ret; int ret;
struct proc_dir_entry *root_klp_dir, *res;
ret = klp_check_compiler_support(); ret = klp_check_compiler_support();
if (ret) { if (ret) {
...@@ -1056,11 +1321,25 @@ static int __init klp_init(void) ...@@ -1056,11 +1321,25 @@ static int __init klp_init(void)
return -EINVAL; return -EINVAL;
} }
root_klp_dir = proc_mkdir("livepatch", NULL);
if (!root_klp_dir)
goto error_out;
res = proc_create("livepatch/state", 0, NULL,
&proc_klpstate_operations);
if (!res)
goto error_remove;
klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
if (!klp_root_kobj) if (!klp_root_kobj)
return -ENOMEM; goto error_remove;
return 0; return 0;
error_remove:
remove_proc_entry("livepatch", NULL);
error_out:
return -ENOMEM;
} }
module_init(klp_init); module_init(klp_init);
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include "patch.h" #include "patch.h"
#include "transition.h" #include "transition.h"
#ifdef CONFIG_LIVEPATCH_FTRACE
static LIST_HEAD(klp_ops); static LIST_HEAD(klp_ops);
struct klp_ops *klp_find_ops(unsigned long old_addr) struct klp_ops *klp_find_ops(unsigned long old_addr)
...@@ -236,6 +237,38 @@ static int klp_patch_func(struct klp_func *func) ...@@ -236,6 +237,38 @@ static int klp_patch_func(struct klp_func *func)
return ret; return ret;
} }
#else /* #ifdef CONFIG_LIVEPATCH_WO_FTRACE */
static void klp_unpatch_func(struct klp_func *func)
{
if (WARN_ON(!func->patched))
return;
if (WARN_ON(!func->old_addr))
return;
arch_klp_unpatch_func(func);
func->patched = false;
}
static inline int klp_patch_func(struct klp_func *func)
{
int ret = 0;
if (WARN_ON(!func->old_addr))
return -EINVAL;
if (WARN_ON(func->patched))
return -EINVAL;
ret = arch_klp_patch_func(func);
if (!ret)
func->patched = true;
return ret;
}
#endif
void klp_unpatch_object(struct klp_object *obj) void klp_unpatch_object(struct klp_object *obj)
{ {
struct klp_func *func; struct klp_func *func;
......
...@@ -22,7 +22,12 @@ ...@@ -22,7 +22,12 @@
struct klp_ops { struct klp_ops {
struct list_head node; struct list_head node;
struct list_head func_stack; struct list_head func_stack;
#ifdef CONFIG_LIVEPATCH_FTRACE
struct ftrace_ops fops; struct ftrace_ops fops;
#else /* CONFIG_LIVEPATCH_WO_FTRACE */
struct list_head func_list;
unsigned long old_addr;
#endif
}; };
struct klp_ops *klp_find_ops(unsigned long old_addr); struct klp_ops *klp_find_ops(unsigned long old_addr);
......
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-sample.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-sample.o
ifeq ($(CONFIG_LIVEPATCH_FTRACE), y)
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-mod.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-mod.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix1.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix1.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix2.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix2.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-demo.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-demo.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-mod.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-mod.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-busymod.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-busymod.o
endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册