提交 3c33f5b9 编写于 作者: J Josh Poimboeuf 提交者: Jiri Kosina

livepatch: support for repatching a function

Add support for patching a function multiple times.  If multiple patches
affect a function, the function in the most recently enabled patch
"wins".  This enables a cumulative patch upgrade path, where each patch
is a superset of previous patches.

This requires restructuring the data a little bit.  With the current
design, where each klp_func struct has its own ftrace_ops, we'd have to
unregister the old ops and then register the new ops, because
FTRACE_OPS_FL_IPMODIFY prevents us from having two ops registered for
the same function at the same time.  That would leave a regression
window where the function isn't patched at all (not good for a patch
upgrade path).

This patch replaces the per-klp_func ftrace_ops with a global klp_ops
list, with one ftrace_ops per original function.  A single ftrace_ops is
shared between all klp_funcs which have the same old_addr.  This allows
the switch between function versions to happen instantaneously by
updating the klp_ops struct's func_stack list.  The winner is the
klp_func at the top of the func_stack (front of the list).

[ jkosina@suse.cz: turn WARN_ON() into WARN_ON_ONCE() in ftrace handler to
  avoid storm in pathological cases ]
Signed-off-by: NJosh Poimboeuf <jpoimboe@redhat.com>
Reviewed-by: NJiri Slaby <jslaby@suse.cz>
Signed-off-by: NJiri Kosina <jkosina@suse.cz>
上级 83a90bb1
...@@ -40,8 +40,8 @@ enum klp_state { ...@@ -40,8 +40,8 @@ enum klp_state {
* @old_addr: a hint conveying at what address the old function * @old_addr: a hint conveying at what address the old function
* can be found (optional, vmlinux patches only) * can be found (optional, vmlinux patches only)
* @kobj: kobject for sysfs resources * @kobj: kobject for sysfs resources
* @fops: ftrace operations structure
* @state: tracks function-level patch application state * @state: tracks function-level patch application state
* @stack_node: list node for klp_ops func_stack list
*/ */
struct klp_func { struct klp_func {
/* external */ /* external */
...@@ -59,8 +59,8 @@ struct klp_func { ...@@ -59,8 +59,8 @@ struct klp_func {
/* internal */ /* internal */
struct kobject kobj; struct kobject kobj;
struct ftrace_ops *fops;
enum klp_state state; enum klp_state state;
struct list_head stack_node;
}; };
/** /**
......
...@@ -29,17 +29,53 @@ ...@@ -29,17 +29,53 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/livepatch.h> #include <linux/livepatch.h>
/* /**
* The klp_mutex protects the klp_patches list and state transitions of any * struct klp_ops - structure for tracking registered ftrace ops structs
* structure reachable from the patches list. References to any structure must *
* be obtained under mutex protection. * A single ftrace_ops is shared between all enabled replacement functions
* (klp_func structs) which have the same old_addr. This allows the switch
* between function versions to happen instantaneously by updating the klp_ops
* struct's func_stack list. The winner is the klp_func at the top of the
* func_stack (front of the list).
*
* @node: node for the global klp_ops list
* @func_stack: list head for the stack of klp_func's (active func is on top)
* @fops: registered ftrace ops struct
*/ */
struct klp_ops {
struct list_head node;
struct list_head func_stack;
struct ftrace_ops fops;
};
/*
* The klp_mutex protects the global lists and state transitions of any
* structure reachable from them. References to any structure must be obtained
* under mutex protection (except in klp_ftrace_handler(), which uses RCU to
* ensure it gets consistent data).
*/
static DEFINE_MUTEX(klp_mutex); static DEFINE_MUTEX(klp_mutex);
static LIST_HEAD(klp_patches); static LIST_HEAD(klp_patches);
static LIST_HEAD(klp_ops);
static struct kobject *klp_root_kobj; static struct kobject *klp_root_kobj;
static struct klp_ops *klp_find_ops(unsigned long old_addr)
{
struct klp_ops *ops;
struct klp_func *func;
list_for_each_entry(ops, &klp_ops, node) {
func = list_first_entry(&ops->func_stack, struct klp_func,
stack_node);
if (func->old_addr == old_addr)
return ops;
}
return NULL;
}
static bool klp_is_module(struct klp_object *obj) static bool klp_is_module(struct klp_object *obj)
{ {
return obj->name; return obj->name;
...@@ -267,16 +303,28 @@ static int klp_write_object_relocations(struct module *pmod, ...@@ -267,16 +303,28 @@ static int klp_write_object_relocations(struct module *pmod,
static void notrace klp_ftrace_handler(unsigned long ip, static void notrace klp_ftrace_handler(unsigned long ip,
unsigned long parent_ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_ops *fops,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct klp_func *func = ops->private; struct klp_ops *ops;
struct klp_func *func;
ops = container_of(fops, struct klp_ops, fops);
rcu_read_lock();
func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
stack_node);
rcu_read_unlock();
if (WARN_ON_ONCE(!func))
return;
klp_arch_set_pc(regs, (unsigned long)func->new_func); klp_arch_set_pc(regs, (unsigned long)func->new_func);
} }
static int klp_disable_func(struct klp_func *func) static int klp_disable_func(struct klp_func *func)
{ {
struct klp_ops *ops;
int ret; int ret;
if (WARN_ON(func->state != KLP_ENABLED)) if (WARN_ON(func->state != KLP_ENABLED))
...@@ -285,17 +333,29 @@ static int klp_disable_func(struct klp_func *func) ...@@ -285,17 +333,29 @@ static int klp_disable_func(struct klp_func *func)
if (WARN_ON(!func->old_addr)) if (WARN_ON(!func->old_addr))
return -EINVAL; return -EINVAL;
ret = unregister_ftrace_function(func->fops); ops = klp_find_ops(func->old_addr);
if (WARN_ON(!ops))
return -EINVAL;
if (list_is_singular(&ops->func_stack)) {
ret = unregister_ftrace_function(&ops->fops);
if (ret) { if (ret) {
pr_err("failed to unregister ftrace handler for function '%s' (%d)\n", pr_err("failed to unregister ftrace handler for function '%s' (%d)\n",
func->old_name, ret); func->old_name, ret);
return ret; return ret;
} }
ret = ftrace_set_filter_ip(func->fops, func->old_addr, 1, 0); ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
if (ret) if (ret)
pr_warn("function unregister succeeded but failed to clear the filter\n"); pr_warn("function unregister succeeded but failed to clear the filter\n");
list_del_rcu(&func->stack_node);
list_del(&ops->node);
kfree(ops);
} else {
list_del_rcu(&func->stack_node);
}
func->state = KLP_DISABLED; func->state = KLP_DISABLED;
return 0; return 0;
...@@ -303,6 +363,7 @@ static int klp_disable_func(struct klp_func *func) ...@@ -303,6 +363,7 @@ static int klp_disable_func(struct klp_func *func)
static int klp_enable_func(struct klp_func *func) static int klp_enable_func(struct klp_func *func)
{ {
struct klp_ops *ops;
int ret; int ret;
if (WARN_ON(!func->old_addr)) if (WARN_ON(!func->old_addr))
...@@ -311,22 +372,50 @@ static int klp_enable_func(struct klp_func *func) ...@@ -311,22 +372,50 @@ static int klp_enable_func(struct klp_func *func)
if (WARN_ON(func->state != KLP_DISABLED)) if (WARN_ON(func->state != KLP_DISABLED))
return -EINVAL; return -EINVAL;
ret = ftrace_set_filter_ip(func->fops, func->old_addr, 0, 0); ops = klp_find_ops(func->old_addr);
if (!ops) {
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
return -ENOMEM;
ops->fops.func = klp_ftrace_handler;
ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
FTRACE_OPS_FL_DYNAMIC |
FTRACE_OPS_FL_IPMODIFY;
list_add(&ops->node, &klp_ops);
INIT_LIST_HEAD(&ops->func_stack);
list_add_rcu(&func->stack_node, &ops->func_stack);
ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
if (ret) { if (ret) {
pr_err("failed to set ftrace filter for function '%s' (%d)\n", pr_err("failed to set ftrace filter for function '%s' (%d)\n",
func->old_name, ret); func->old_name, ret);
return ret; goto err;
} }
ret = register_ftrace_function(func->fops); ret = register_ftrace_function(&ops->fops);
if (ret) { if (ret) {
pr_err("failed to register ftrace handler for function '%s' (%d)\n", pr_err("failed to register ftrace handler for function '%s' (%d)\n",
func->old_name, ret); func->old_name, ret);
ftrace_set_filter_ip(func->fops, func->old_addr, 1, 0); ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
goto err;
}
} else { } else {
func->state = KLP_ENABLED; list_add_rcu(&func->stack_node, &ops->func_stack);
} }
func->state = KLP_ENABLED;
return ret;
err:
list_del_rcu(&func->stack_node);
list_del(&ops->node);
kfree(ops);
return ret; return ret;
} }
...@@ -582,10 +671,6 @@ static struct kobj_type klp_ktype_patch = { ...@@ -582,10 +671,6 @@ static struct kobj_type klp_ktype_patch = {
static void klp_kobj_release_func(struct kobject *kobj) static void klp_kobj_release_func(struct kobject *kobj)
{ {
struct klp_func *func;
func = container_of(kobj, struct klp_func, kobj);
kfree(func->fops);
} }
static struct kobj_type klp_ktype_func = { static struct kobj_type klp_ktype_func = {
...@@ -642,28 +727,11 @@ static void klp_free_patch(struct klp_patch *patch) ...@@ -642,28 +727,11 @@ static void klp_free_patch(struct klp_patch *patch)
static int klp_init_func(struct klp_object *obj, struct klp_func *func) static int klp_init_func(struct klp_object *obj, struct klp_func *func)
{ {
struct ftrace_ops *ops; INIT_LIST_HEAD(&func->stack_node);
int ret;
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
return -ENOMEM;
ops->private = func;
ops->func = klp_ftrace_handler;
ops->flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_DYNAMIC |
FTRACE_OPS_FL_IPMODIFY;
func->fops = ops;
func->state = KLP_DISABLED; func->state = KLP_DISABLED;
ret = kobject_init_and_add(&func->kobj, &klp_ktype_func, return kobject_init_and_add(&func->kobj, &klp_ktype_func,
obj->kobj, func->old_name); obj->kobj, func->old_name);
if (ret) {
kfree(func->fops);
return ret;
}
return 0;
} }
/* parts of the initialization that is done only when the object is loaded */ /* parts of the initialization that is done only when the object is loaded */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册