diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index 9f19752a1be3871464bc13bb76566bd3f51b9ebb..edcd736d2e96abb8f98cd8558de5cbb9c6301038 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -56,5 +56,15 @@ config LIVEPATCH_STACK help Say N here if you want to remove the patch stacking principle. +config LIVEPATCH_RESTRICT_KPROBE + bool "Enforing check livepatch and kprobe restrict" + depends on LIVEPATCH_WO_FTRACE + depends on KPROBES + default y + help + Livepatch without ftrace and kprobe are conflicting. + We should not patch for the functions where registered with kprobe, + and vice versa. + Say Y here if you want to check those. endmenu endif diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index b99abdeca4ed096d41631dc4d1b95a8e6cfbe6b0..c163658091d443346cfa4f07e6ed468fdd3d003b 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -35,6 +35,9 @@ #include "patch.h" #include #include +#ifdef CONFIG_LIVEPATCH_RESTRICT_KPROBE +#include +#endif #ifdef CONFIG_LIVEPATCH_FTRACE #include "transition.h" @@ -64,6 +67,40 @@ struct patch_data { }; #endif +#ifdef CONFIG_LIVEPATCH_RESTRICT_KPROBE +/* + * Check whether a function has been registered with kprobes before patched. + * We can't patched this function util we unregisted the kprobes. + */ +struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + struct kprobe *kp; + int i; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + for (i = 0; i < func->old_size; i++) { + kp = get_kprobe((void *)func->old_addr + i); + if (kp) { + pr_err("func %s has been probed, (un)patch failed\n", + func->old_name); + return kp; + } + } + } + } + + return NULL; +} +#else +static inline struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) +{ + return NULL; +} +#endif /* CONFIG_LIVEPATCH_RESTRICT_KPROBE */ + static bool klp_is_module(struct klp_object *obj) { return obj->name; @@ -371,6 +408,12 @@ int klp_try_disable_patch(void *data) if (atomic_inc_return(&pd->cpu_count) == 1) { struct klp_patch *patch = pd->patch; + if (klp_check_patch_kprobed(patch)) { + flag = 1; + atomic_inc(&pd->cpu_count); + return -EINVAL; + } + ret = klp_check_calltrace(patch, 1); if (ret) { flag = 1; @@ -569,6 +612,12 @@ int klp_try_enable_patch(void *data) if (atomic_inc_return(&pd->cpu_count) == 1) { struct klp_patch *patch = pd->patch; + if (klp_check_patch_kprobed(patch)) { + flag = 1; + atomic_inc(&pd->cpu_count); + return -EINVAL; + } + ret = klp_check_calltrace(patch, 1); if (ret) { flag = 1;