From 386dd48a892aee64a36ed4e519c30235d2414bfe Mon Sep 17 00:00:00 2001 From: Cheng Jian Date: Mon, 28 Jan 2019 10:09:20 +0800 Subject: [PATCH] livepatch/core: split livepatch consistency euler inclusion category: feature Bugzilla: 5507 CVE: N/A ---------------------------------------- In the previous version we forced the association between livepatch wo_ftrace and stop_machine. This is unwise and obviously confusing. commit d83a7cb375ee ("livepatch: change to a per-task consistency model") introduce a PER-TASK consistency model. It's a hybrid of kGraft and kpatch: it uses kGraft's per-task consistency and syscall barrier switching combined with kpatch's stack trace switching. There are also a number of fallback options which make it quite flexible. So we split livepatch consistency for without ftrace to two model: [1] PER-TASK consistency model. per-task consistency and syscall barrier switching combined with kpatch's stack trace switching. [2] STOP-MACHINE consistency model. stop-machine consistency and kpatch's stack trace switching. Signed-off-by: Cheng Jian Reviewed-by: Li Bin Signed-off-by: Yang Yingliang --- include/linux/livepatch.h | 6 ++--- kernel/livepatch/Kconfig | 27 +++++++++++++++++++++ kernel/livepatch/Makefile | 4 ++-- kernel/livepatch/core.c | 45 +++++++++++++++++------------------ kernel/livepatch/transition.c | 7 ++++++ 5 files changed, 61 insertions(+), 28 deletions(-) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index f6ec6086a15f..f8bff3713781 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -83,7 +83,7 @@ struct klp_func { struct list_head stack_node; unsigned long old_size, new_size; bool patched; -#ifdef CONFIG_LIVEPATCH_FTRACE +#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY bool transition; #endif }; @@ -181,7 +181,7 @@ int klp_disable_patch(struct klp_patch *); void arch_klp_init_object_loaded(struct klp_patch *patch, struct klp_object *obj); -#ifdef CONFIG_LIVEPATCH_FTRACE +#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY /* Called from the module loader during module coming/going states */ int klp_module_coming(struct module *mod); void klp_module_going(struct module *mod); @@ -215,7 +215,7 @@ void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor); void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); -#else /* !CONFIG_LIVEPATCH_FTRACE */ +#else /* !CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */ static inline int klp_module_coming(struct module *mod) { return 0; } static inline void klp_module_going(struct module *mod) {} diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index edcd736d2e96..123777f82d17 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -49,6 +49,33 @@ config LIVEPATCH_WO_FTRACE endchoice +choice + prompt "live patching consistency model" + depends on LIVEPATCH + default LIVEPATCH_PER_TASK_CONSISTENCY if LIVEPATCH_FTRACE + default LIVEPATCH_STOP_MACHINE_CONSISTENCY if LIVEPATCH_WO_FTRACE + help + Livepatch consistency model configuration. + +config LIVEPATCH_PER_TASK_CONSISTENCY + bool "per task consistency" + help + Use basic per-task consistency model + It's a hybrid of kGraft and kpatch: + uses kGraft's per-task consistency and syscall + barrier switching combined with kpatch's stack + trace switching. There are also a number of + fallback options which make it quite flexible. + +config LIVEPATCH_STOP_MACHINE_CONSISTENCY + bool "stop machine consistency" + help + Use stop machine consistency model + stop-machine consistency and kpatch's stack + trace checking. + +endchoice + config LIVEPATCH_STACK bool "Enforcing the patch stacking principle" depends on LIVEPATCH_FTRACE || LIVEPATCH_WO_FTRACE diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile index 4d0137752c65..db7c1a0d4c1a 100644 --- a/kernel/livepatch/Makefile +++ b/kernel/livepatch/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_LIVEPATCH) += livepatch.o -obj-$(CONFIG_LIVEPATCH_FTRACE) += transition.o -obj-$(CONFIG_LIVEPATCH_FTRACE) += shadow.o +obj-$(CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY) += transition.o +obj-$(CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY) += shadow.o livepatch-objs := core.o patch.o diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index c163658091d4..dbf19c5b29b3 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -39,10 +39,9 @@ #include #endif -#ifdef CONFIG_LIVEPATCH_FTRACE +#if defined(CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY) #include "transition.h" -#endif -#ifdef CONFIG_LIVEPATCH_WO_FTRACE +#elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) #include #endif @@ -60,7 +59,7 @@ static LIST_HEAD(klp_patches); static struct kobject *klp_root_kobj; -#ifdef CONFIG_LIVEPATCH_WO_FTRACE +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY struct patch_data { struct klp_patch *patch; atomic_t cpu_count; @@ -346,7 +345,7 @@ static int klp_write_object_relocations(struct module *pmod, return ret; } -#ifdef CONFIG_LIVEPATCH_FTRACE +#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY static int __klp_disable_patch(struct klp_patch *patch) { struct klp_object *obj; @@ -385,7 +384,7 @@ static int __klp_disable_patch(struct klp_patch *patch) return 0; } -#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */ +#elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) /* * This function is called from stop_machine() context. */ @@ -462,7 +461,7 @@ static int __klp_disable_patch(struct klp_patch *patch) return ret; } -#endif /* ifdef CONFIG_LIVEPATCH_FTRACE */ +#endif /* if defined(CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY) */ /** @@ -497,7 +496,7 @@ int klp_disable_patch(struct klp_patch *patch) } EXPORT_SYMBOL_GPL(klp_disable_patch); -#ifdef CONFIG_LIVEPATCH_FTRACE +#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY static int __klp_enable_patch(struct klp_patch *patch) { struct klp_object *obj; @@ -566,7 +565,7 @@ static int __klp_enable_patch(struct klp_patch *patch) klp_cancel_transition(); return ret; } -#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */ +#elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) /* * This function is called from stop_machine() context. */ @@ -674,7 +673,7 @@ static int __klp_enable_patch(struct klp_patch *patch) return 0; } -#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */ +#endif /* #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY */ /** * klp_enable_patch() - enables a registered patch @@ -747,7 +746,7 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, goto err; } -#ifdef CONFIG_LIVEPATCH_FTRACE +#if defined(CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY) if (patch == klp_transition_patch) { klp_reverse_transition(); } else if (enabled) { @@ -759,7 +758,7 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, if (ret) goto err; } -#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */ +#elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) if (enabled) { ret = __klp_enable_patch(patch); if (ret) @@ -789,7 +788,7 @@ static ssize_t enabled_show(struct kobject *kobj, return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled); } -#ifdef CONFIG_LIVEPATCH_FTRACE +#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY static ssize_t transition_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -857,22 +856,22 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, return count; } -#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */ +#endif /* #ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */ static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); -#ifdef CONFIG_LIVEPATCH_FTRACE +#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal); static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); -#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */ +#endif /* #ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */ static struct attribute *klp_patch_attrs[] = { &enabled_kobj_attr.attr, -#ifdef CONFIG_LIVEPATCH_FTRACE +#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY &transition_kobj_attr.attr, &signal_kobj_attr.attr, &force_kobj_attr.attr, -#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */ +#endif /* #ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */ NULL }; @@ -957,7 +956,7 @@ static void klp_free_funcs_limited(struct klp_object *obj, kobject_put(&func->kobj); } -#ifdef CONFIG_LIVEPATCH_FTRACE +#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY /* Clean up when a patched object is unloaded */ static void klp_free_object_loaded(struct klp_object *obj) { @@ -968,7 +967,7 @@ static void klp_free_object_loaded(struct klp_object *obj) klp_for_each_func(obj, func) func->old_addr = 0; } -#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */ +#endif /* #ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */ /* * Free all objects' kobjects in the array up to some limit. When limit is @@ -1022,7 +1021,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) INIT_LIST_HEAD(&func->stack_node); func->patched = false; -#ifdef CONFIG_LIVEPATCH_FTRACE +#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY func->transition = false; #endif @@ -1297,7 +1296,7 @@ int klp_register_patch(struct klp_patch *patch) } EXPORT_SYMBOL_GPL(klp_register_patch); -#ifdef CONFIG_LIVEPATCH_FTRACE +#ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY /* * Remove parts of patches that touch a given kernel module. The list of * patches processed might be limited. When limit is NULL, all patches @@ -1439,7 +1438,7 @@ void klp_module_going(struct module *mod) mutex_unlock(&klp_mutex); } -#endif /* ifdef CONFIG_LIVEPATCH_FTRACE */ +#endif /* ifdef CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */ static int __init klp_init(void) { diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 5bc349805e03..a0c2dd03e56b 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -206,7 +206,9 @@ static int klp_check_stack_func(struct klp_func *func, struct stack_trace *trace) { unsigned long func_addr, func_size, address; +#ifdef CONFIG_LIVEPATCH_FTRACE struct klp_ops *ops; +#endif int i; for (i = 0; i < trace->nr_entries; i++) { @@ -220,6 +222,7 @@ static int klp_check_stack_func(struct klp_func *func, func_addr = (unsigned long)func->new_func; func_size = func->new_size; } else { +#ifdef CONFIG_LIVEPATCH_FTRACE /* * Check for the to-be-patched function * (the previous func). @@ -238,6 +241,10 @@ static int klp_check_stack_func(struct klp_func *func, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } +#else + func_addr = func->old_addr; + func_size = func->old_size; +#endif } if (address >= func_addr && address < func_addr + func_size) -- GitLab