提交 b25272c0 编写于 作者: L Li Huafei 提交者: Zheng Zengkai

livepatch: Use breakpoint exception to optimize enabling livepatch

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5CJ7X

--------------------------------

The commit 86e35fae ("livepatch: checks only if the replaced
instruction is on the stack") optimizes stack checking. However, for
extremely hot functions, the replaced instruction may still be on the
stack, and there is room for further optimization.

By inserting a breakpoint exception instruction at the entry of the
patched old function, we can divert calls from the old function to the
new function. In this way, during stack check, only tasks that have
entered the old function before the breakpoint is inserted need to be
considered. This increases the probability of passing the stack check.

If the stack check fails, we sleep for a period of time and try again,
giving the task entering the old function a chance to run out of the
instruction replacement area.

We first enable the patch using the normal process, that is, do not
insert breakpoints. If the first enable fails and the force flag
KLP_STACK_OPTIMIZE is set for all functions of the patch, then we use
breakpoint exception optimization.
Signed-off-by: NLi Huafei <lihuafei1@huawei.com>
Reviewed-by: NXu Kuohai <xukuohai@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 37cf995a
...@@ -137,7 +137,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, ...@@ -137,7 +137,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable,
for (obj = patch->objs; obj->funcs; obj++) { for (obj = patch->objs; obj->funcs; obj++) {
for (func = obj->funcs; func->old_name; func++) { for (func = obj->funcs; func->old_name; func++) {
if (enable) { if (enable) {
if (func->force == KLP_ENFORCEMENT) if (func->patched || func->force == KLP_ENFORCEMENT)
continue; continue;
/* /*
* When enable, checking the currently * When enable, checking the currently
......
...@@ -131,7 +131,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, ...@@ -131,7 +131,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable,
for (obj = patch->objs; obj->funcs; obj++) { for (obj = patch->objs; obj->funcs; obj++) {
for (func = obj->funcs; func->old_name; func++) { for (func = obj->funcs; func->old_name; func++) {
if (enable) { if (enable) {
if (func->force == KLP_ENFORCEMENT) if (func->patched || func->force == KLP_ENFORCEMENT)
continue; continue;
/* /*
* When enable, checking the currently * When enable, checking the currently
......
...@@ -134,7 +134,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, ...@@ -134,7 +134,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable,
for (obj = patch->objs; obj->funcs; obj++) { for (obj = patch->objs; obj->funcs; obj++) {
for (func = obj->funcs; func->old_name; func++) { for (func = obj->funcs; func->old_name; func++) {
if (enable) { if (enable) {
if (func->force == KLP_ENFORCEMENT) if (func->patched || func->force == KLP_ENFORCEMENT)
continue; continue;
/* /*
* When enable, checking the currently * When enable, checking the currently
......
...@@ -143,7 +143,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, ...@@ -143,7 +143,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable,
/* Check func address in stack */ /* Check func address in stack */
if (enable) { if (enable) {
if (func->force == KLP_ENFORCEMENT) if (func->patched || func->force == KLP_ENFORCEMENT)
continue; continue;
/* /*
* When enable, checking the currently * When enable, checking the currently
......
...@@ -126,7 +126,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, ...@@ -126,7 +126,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable,
/* Check func address in stack */ /* Check func address in stack */
if (enable) { if (enable) {
if (func->force == KLP_ENFORCEMENT) if (func->patched || func->force == KLP_ENFORCEMENT)
continue; continue;
/* /*
* When enable, checking the currently * When enable, checking the currently
......
...@@ -229,19 +229,29 @@ struct klp_func_node { ...@@ -229,19 +229,29 @@ struct klp_func_node {
struct list_head func_stack; struct list_head func_stack;
void *old_func; void *old_func;
struct arch_klp_data arch_data; struct arch_klp_data arch_data;
/*
* Used in breakpoint exception handling functions.
* If 'brk_func' is NULL, no breakpoint is inserted into the entry of
* the old function.
* If it is not NULL, the value is the new function that will jump to
* when the breakpoint exception is triggered.
*/
void *brk_func;
}; };
struct klp_func_node *klp_find_func_node(const void *old_func); struct klp_func_node *klp_find_func_node(const void *old_func);
void klp_add_func_node(struct klp_func_node *func_node); void klp_add_func_node(struct klp_func_node *func_node);
void klp_del_func_node(struct klp_func_node *func_node); void klp_del_func_node(struct klp_func_node *func_node);
void *klp_get_brk_func(void *addr);
static inline static inline
int klp_compare_address(unsigned long pc, unsigned long func_addr, int klp_compare_address(unsigned long pc, unsigned long func_addr,
const char *func_name, unsigned long check_size) const char *func_name, unsigned long check_size)
{ {
if (pc >= func_addr && pc < func_addr + check_size) { if (pc >= func_addr && pc < func_addr + check_size) {
pr_err("func %s is in use!\n", func_name); pr_warn("func %s is in use!\n", func_name);
return -EBUSY; /* Return -EAGAIN for next retry */
return -EAGAIN;
} }
return 0; return 0;
} }
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "state.h" #include "state.h"
#include "transition.h" #include "transition.h"
#elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) #elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY)
#include <linux/delay.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
#endif #endif
...@@ -57,6 +58,7 @@ static struct kobject *klp_root_kobj; ...@@ -57,6 +58,7 @@ static struct kobject *klp_root_kobj;
struct patch_data { struct patch_data {
struct klp_patch *patch; struct klp_patch *patch;
atomic_t cpu_count; atomic_t cpu_count;
bool rollback;
}; };
#endif #endif
...@@ -1300,6 +1302,37 @@ void klp_del_func_node(struct klp_func_node *func_node) ...@@ -1300,6 +1302,37 @@ void klp_del_func_node(struct klp_func_node *func_node)
list_del_rcu(&func_node->node); list_del_rcu(&func_node->node);
} }
/*
* Called from the breakpoint exception handler function.
*/
void *klp_get_brk_func(void *addr)
{
struct klp_func_node *func_node;
void *brk_func = NULL;
if (!addr)
return NULL;
rcu_read_lock();
func_node = klp_find_func_node(addr);
if (!func_node)
goto unlock;
/*
* Corresponds to smp_wmb() in {add, remove}_breakpoint(). If the
* current breakpoint exception belongs to us, we have observed the
* breakpoint instruction, so brk_func must be observed.
*/
smp_rmb();
brk_func = func_node->brk_func;
unlock:
rcu_read_unlock();
return brk_func;
}
/* /*
* This function is called from stop_machine() context. * This function is called from stop_machine() context.
*/ */
...@@ -1370,6 +1403,25 @@ long __weak arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_fu ...@@ -1370,6 +1403,25 @@ long __weak arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_fu
return -ENOSYS; return -ENOSYS;
} }
int __weak arch_klp_check_breakpoint(struct arch_klp_data *arch_data, void *old_func)
{
return 0;
}
int __weak arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func)
{
return -ENOTSUPP;
}
void __weak arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func)
{
}
void __weak arch_klp_set_brk_func(struct klp_func_node *func_node, void *new_func)
{
func_node->brk_func = new_func;
}
static struct klp_func_node *func_node_alloc(struct klp_func *func) static struct klp_func_node *func_node_alloc(struct klp_func *func)
{ {
long ret; long ret;
...@@ -1444,6 +1496,110 @@ static int klp_mem_prepare(struct klp_patch *patch) ...@@ -1444,6 +1496,110 @@ static int klp_mem_prepare(struct klp_patch *patch)
return 0; return 0;
} }
static void remove_breakpoint(struct klp_func *func, bool restore)
{
struct klp_func_node *func_node = klp_find_func_node(func->old_func);
struct arch_klp_data *arch_data = &func_node->arch_data;
if (!func_node->brk_func)
return;
if (restore)
arch_klp_remove_breakpoint(arch_data, func->old_func);
/* Wait for all breakpoint exception handler functions to exit. */
synchronize_rcu();
/* 'brk_func' cannot be set to NULL before the breakpoint is removed. */
smp_wmb();
arch_klp_set_brk_func(func_node, NULL);
}
static void __klp_breakpoint_post_process(struct klp_patch *patch, bool restore)
{
struct klp_object *obj;
struct klp_func *func;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
remove_breakpoint(func, restore);
}
}
}
static int add_breakpoint(struct klp_func *func)
{
struct klp_func_node *func_node = klp_find_func_node(func->old_func);
struct arch_klp_data *arch_data = &func_node->arch_data;
int ret;
if (WARN_ON_ONCE(func_node->brk_func))
return -EINVAL;
ret = arch_klp_check_breakpoint(arch_data, func->old_func);
if (ret)
return ret;
arch_klp_set_brk_func(func_node, func->new_func);
/*
* When entering an exception, we must see 'brk_func' or the kernel
* will not be able to handle the breakpoint exception we are about
* to insert.
*/
smp_wmb();
ret = arch_klp_add_breakpoint(arch_data, func->old_func);
if (ret)
arch_klp_set_brk_func(func_node, NULL);
return ret;
}
static int klp_add_breakpoint(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
int ret;
/*
* Ensure that the module is not uninstalled before the breakpoint is
* removed. After the breakpoint is removed, it can be ensured that the
* new function will not be jumped through the handler function of the
* breakpoint.
*/
if (!try_module_get(patch->mod))
return -ENODEV;
arch_klp_code_modify_prepare();
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
ret = add_breakpoint(func);
if (ret) {
__klp_breakpoint_post_process(patch, true);
arch_klp_code_modify_post_process();
module_put(patch->mod);
return ret;
}
}
}
arch_klp_code_modify_post_process();
return 0;
}
static void klp_breakpoint_post_process(struct klp_patch *patch, bool restore)
{
arch_klp_code_modify_prepare();
__klp_breakpoint_post_process(patch, restore);
arch_klp_code_modify_post_process();
module_put(patch->mod);
}
static int __klp_disable_patch(struct klp_patch *patch) static int __klp_disable_patch(struct klp_patch *patch)
{ {
int ret; int ret;
...@@ -1614,7 +1770,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch); ...@@ -1614,7 +1770,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch);
/* /*
* This function is called from stop_machine() context. * This function is called from stop_machine() context.
*/ */
static int enable_patch(struct klp_patch *patch) static int enable_patch(struct klp_patch *patch, bool rollback)
{ {
struct klp_object *obj; struct klp_object *obj;
int ret; int ret;
...@@ -1622,19 +1778,21 @@ static int enable_patch(struct klp_patch *patch) ...@@ -1622,19 +1778,21 @@ static int enable_patch(struct klp_patch *patch)
pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n"); pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK); add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
if (!try_module_get(patch->mod)) if (!patch->enabled) {
return -ENODEV; if (!try_module_get(patch->mod))
return -ENODEV;
patch->enabled = true; patch->enabled = true;
pr_notice("enabling patch '%s'\n", patch->mod->name); pr_notice("enabling patch '%s'\n", patch->mod->name);
}
klp_for_each_object(patch, obj) { klp_for_each_object(patch, obj) {
if (!klp_is_object_loaded(obj)) if (!klp_is_object_loaded(obj))
continue; continue;
ret = klp_patch_object(obj); ret = klp_patch_object(obj, rollback);
if (ret) { if (ret && klp_need_rollback(ret, rollback)) {
pr_warn("failed to patch object '%s'\n", pr_warn("failed to patch object '%s'\n",
klp_is_module(obj) ? obj->name : "vmlinux"); klp_is_module(obj) ? obj->name : "vmlinux");
goto disable; goto disable;
...@@ -1666,7 +1824,7 @@ int klp_try_enable_patch(void *data) ...@@ -1666,7 +1824,7 @@ int klp_try_enable_patch(void *data)
atomic_inc(&pd->cpu_count); atomic_inc(&pd->cpu_count);
return ret; return ret;
} }
ret = enable_patch(patch); ret = enable_patch(patch, pd->rollback);
if (ret) { if (ret) {
atomic_inc(&pd->cpu_count); atomic_inc(&pd->cpu_count);
return ret; return ret;
...@@ -1682,12 +1840,89 @@ int klp_try_enable_patch(void *data) ...@@ -1682,12 +1840,89 @@ int klp_try_enable_patch(void *data)
return ret; return ret;
} }
/*
* When the stop_machine is used to enable the patch, if the patch fails to be
* enabled because the stack check fails, a certain number of retries are
* allowed. The maximum number of retries is KLP_RETRY_COUNT.
*
* Sleeps for KLP_RETRY_INTERVAL milliseconds before each retry to give tasks
* that fail the stack check a chance to run out of the instruction replacement
* area.
*/
#define KLP_RETRY_COUNT 5
#define KLP_RETRY_INTERVAL 100
static bool klp_use_breakpoint(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
if (func->force != KLP_STACK_OPTIMIZE)
return false;
}
}
return true;
}
static int klp_breakpoint_optimize(struct klp_patch *patch)
{
int ret;
int i;
int cnt = 0;
ret = klp_add_breakpoint(patch);
if (ret) {
pr_err("failed to add breakpoints, ret=%d\n", ret);
return ret;
}
for (i = 0; i < KLP_RETRY_COUNT; i++) {
struct patch_data patch_data = {
.patch = patch,
.cpu_count = ATOMIC_INIT(0),
.rollback = false,
};
if (i == KLP_RETRY_COUNT - 1)
patch_data.rollback = true;
cnt++;
arch_klp_code_modify_prepare();
ret = stop_machine(klp_try_enable_patch, &patch_data,
cpu_online_mask);
arch_klp_code_modify_post_process();
if (!ret || ret != -EAGAIN)
break;
pr_notice("try again in %d ms.\n", KLP_RETRY_INTERVAL);
msleep(KLP_RETRY_INTERVAL);
}
pr_notice("patching %s, tried %d times, ret=%d.\n",
ret ? "failed" : "success", cnt, ret);
/*
* If the patch is enabled successfully, the breakpoint instruction
* has been replaced with the jump instruction. However, if the patch
* fails to be enabled, we need to delete the previously inserted
* breakpoint to restore the instruction at the old function entry.
*/
klp_breakpoint_post_process(patch, !!ret);
return ret;
}
static int __klp_enable_patch(struct klp_patch *patch) static int __klp_enable_patch(struct klp_patch *patch)
{ {
int ret; int ret;
struct patch_data patch_data = { struct patch_data patch_data = {
.patch = patch, .patch = patch,
.cpu_count = ATOMIC_INIT(0), .cpu_count = ATOMIC_INIT(0),
.rollback = true,
}; };
if (WARN_ON(patch->enabled)) if (WARN_ON(patch->enabled))
...@@ -1705,14 +1940,26 @@ static int __klp_enable_patch(struct klp_patch *patch) ...@@ -1705,14 +1940,26 @@ static int __klp_enable_patch(struct klp_patch *patch)
ret = klp_mem_prepare(patch); ret = klp_mem_prepare(patch);
if (ret) if (ret)
return ret; return ret;
arch_klp_code_modify_prepare(); arch_klp_code_modify_prepare();
ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); ret = stop_machine(klp_try_enable_patch, &patch_data,
cpu_online_mask);
arch_klp_code_modify_post_process(); arch_klp_code_modify_post_process();
if (ret) { if (!ret)
klp_mem_recycle(patch); goto move_patch_to_tail;
return ret; if (ret != -EAGAIN)
goto err_out;
if (!klp_use_breakpoint(patch)) {
pr_debug("breakpoint exception optimization is not used.\n");
goto err_out;
} }
ret = klp_breakpoint_optimize(patch);
if (ret)
goto err_out;
move_patch_to_tail:
#ifndef CONFIG_LIVEPATCH_STACK #ifndef CONFIG_LIVEPATCH_STACK
/* move the enabled patch to the list tail */ /* move the enabled patch to the list tail */
list_del(&patch->list); list_del(&patch->list);
...@@ -1720,6 +1967,10 @@ static int __klp_enable_patch(struct klp_patch *patch) ...@@ -1720,6 +1967,10 @@ static int __klp_enable_patch(struct klp_patch *patch)
#endif #endif
return 0; return 0;
err_out:
klp_mem_recycle(patch);
return ret;
} }
/** /**
......
...@@ -57,4 +57,18 @@ static inline void klp_post_unpatch_callback(struct klp_object *obj) ...@@ -57,4 +57,18 @@ static inline void klp_post_unpatch_callback(struct klp_object *obj)
obj->callbacks.post_unpatch_enabled = false; obj->callbacks.post_unpatch_enabled = false;
} }
#endif /* CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */ #endif /* CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */
#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
/*
* In the enable_patch() process, we do not need to roll back the patch
* immediately if the patch fails to enabled. In this way, the function that has
* been successfully patched does not need to be enabled repeatedly during
* retry. However, if it is the last retry (rollback == true) or not because of
* stack check failure (patch_err != -EAGAIN), rollback is required immediately.
*/
static inline bool klp_need_rollback(int patch_err, bool rollback)
{
return patch_err != -EAGAIN || rollback;
}
#endif /* CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY */
#endif /* _LIVEPATCH_CORE_H */ #endif /* _LIVEPATCH_CORE_H */
...@@ -269,10 +269,10 @@ static inline int klp_patch_func(struct klp_func *func) ...@@ -269,10 +269,10 @@ static inline int klp_patch_func(struct klp_func *func)
{ {
int ret = 0; int ret = 0;
if (func->patched)
return 0;
if (WARN_ON(!func->old_func)) if (WARN_ON(!func->old_func))
return -EINVAL; return -EINVAL;
if (WARN_ON(func->patched))
return -EINVAL;
if (WARN_ON(!func->func_node)) if (WARN_ON(!func->func_node))
return -EINVAL; return -EINVAL;
...@@ -306,6 +306,27 @@ void klp_unpatch_object(struct klp_object *obj) ...@@ -306,6 +306,27 @@ void klp_unpatch_object(struct klp_object *obj)
__klp_unpatch_object(obj, false); __klp_unpatch_object(obj, false);
} }
#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
int klp_patch_object(struct klp_object *obj, bool rollback)
{
struct klp_func *func;
int ret;
if (obj->patched)
return 0;
klp_for_each_func(obj, func) {
ret = klp_patch_func(func);
if (ret && klp_need_rollback(ret, rollback)) {
klp_unpatch_object(obj);
return ret;
}
}
obj->patched = true;
return 0;
}
#else
int klp_patch_object(struct klp_object *obj) int klp_patch_object(struct klp_object *obj)
{ {
struct klp_func *func; struct klp_func *func;
...@@ -325,6 +346,7 @@ int klp_patch_object(struct klp_object *obj) ...@@ -325,6 +346,7 @@ int klp_patch_object(struct klp_object *obj)
return 0; return 0;
} }
#endif
static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only) static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
{ {
......
...@@ -29,7 +29,11 @@ struct klp_ops { ...@@ -29,7 +29,11 @@ struct klp_ops {
struct klp_ops *klp_find_ops(void *old_func); struct klp_ops *klp_find_ops(void *old_func);
#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
int klp_patch_object(struct klp_object *obj, bool rollback);
#else
int klp_patch_object(struct klp_object *obj); int klp_patch_object(struct klp_object *obj);
#endif
void klp_unpatch_object(struct klp_object *obj); void klp_unpatch_object(struct klp_object *obj);
void klp_unpatch_objects(struct klp_patch *patch); void klp_unpatch_objects(struct klp_patch *patch);
void klp_unpatch_objects_dynamic(struct klp_patch *patch); void klp_unpatch_objects_dynamic(struct klp_patch *patch);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册