From c8f9d7a3aae362482f81ba7c6819d410d66619ab Mon Sep 17 00:00:00 2001 From: Cheng Jian Date: Mon, 28 Jan 2019 10:09:19 +0800 Subject: [PATCH] livepatch/core: Restrict livepatch patched/unpatched when plant kprobe euler inclusion category: feature Bugzilla: 5507 CVE: N/A ---------------------------------------- livepatch wo_ftrace and kprobe are in conflict, because kprobe may modify the instructions anywhere in the function. So it's dangerous to patched/unpatched an function when there are some kprobes registed on it. Restrict these situation. we should hold kprobe_mutex in klp_check_patch_kprobed, but it's static and can't export, so protect klp_check_patch_probe in stop_machine to avoid registing kprobes when patching. we do nothing for (un)register kprobes on the (old) function which has been patched. because there are sone engineers need this. certainly, it will not lead to hangs, but not recommended. Signed-off-by: Cheng Jian Reviewed-by: Li Bin Signed-off-by: Yang Yingliang --- kernel/livepatch/Kconfig | 10 ++++++++ kernel/livepatch/core.c | 49 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index 9f19752a1be3..edcd736d2e96 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -56,5 +56,15 @@ config LIVEPATCH_STACK help Say N here if you want to remove the patch stacking principle. +config LIVEPATCH_RESTRICT_KPROBE + bool "Enforing check livepatch and kprobe restrict" + depends on LIVEPATCH_WO_FTRACE + depends on KPROBES + default y + help + Livepatch without ftrace and kprobe are conflicting. + We should not patch for the functions where registered with kprobe, + and vice versa. + Say Y here if you want to check those. endmenu endif diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index b99abdeca4ed..c163658091d4 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -35,6 +35,9 @@ #include "patch.h" #include #include +#ifdef CONFIG_LIVEPATCH_RESTRICT_KPROBE +#include +#endif #ifdef CONFIG_LIVEPATCH_FTRACE #include "transition.h" @@ -64,6 +67,40 @@ struct patch_data { }; #endif +#ifdef CONFIG_LIVEPATCH_RESTRICT_KPROBE +/* + * Check whether a function has been registered with kprobes before patched. + * We can't patched this function util we unregisted the kprobes. + */ +struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + struct kprobe *kp; + int i; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + for (i = 0; i < func->old_size; i++) { + kp = get_kprobe((void *)func->old_addr + i); + if (kp) { + pr_err("func %s has been probed, (un)patch failed\n", + func->old_name); + return kp; + } + } + } + } + + return NULL; +} +#else +static inline struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) +{ + return NULL; +} +#endif /* CONFIG_LIVEPATCH_RESTRICT_KPROBE */ + static bool klp_is_module(struct klp_object *obj) { return obj->name; @@ -371,6 +408,12 @@ int klp_try_disable_patch(void *data) if (atomic_inc_return(&pd->cpu_count) == 1) { struct klp_patch *patch = pd->patch; + if (klp_check_patch_kprobed(patch)) { + flag = 1; + atomic_inc(&pd->cpu_count); + return -EINVAL; + } + ret = klp_check_calltrace(patch, 1); if (ret) { flag = 1; @@ -569,6 +612,12 @@ int klp_try_enable_patch(void *data) if (atomic_inc_return(&pd->cpu_count) == 1) { struct klp_patch *patch = pd->patch; + if (klp_check_patch_kprobed(patch)) { + flag = 1; + atomic_inc(&pd->cpu_count); + return -EINVAL; + } + ret = klp_check_calltrace(patch, 1); if (ret) { flag = 1; -- GitLab