diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index fbbe99edf0bbf31f8f86a260d134c0efa1a1987e..4e6dfea097f3dedb0bd294a585d49f77056c6dec 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -57,5 +57,15 @@ config LIVEPATCH_STACK help Say N here if you want to remove the patch stacking principle. +config LIVEPATCH_RESTRICT_KPROBE + bool "Enforing check livepatch and kprobe restrict" + depends on LIVEPATCH_WO_FTRACE + depends on KPROBES + default y + help + Livepatch without ftrace and kprobe are conflicting. + We should not patch for the functions where registered with kprobe, + and vice versa. + Say Y here if you want to check those. endmenu endif diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index b37df4ec904a15dff2503616f3497f4eaac139c8..f20aba5a8e3523200e3d2df85590f754c16b622d 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -24,6 +24,9 @@ #include "patch.h" #include #include +#ifdef CONFIG_LIVEPATCH_RESTRICT_KPROBE +#include +#endif #ifdef CONFIG_LIVEPATCH_FTRACE #include "state.h" #include "transition.h" @@ -58,6 +61,40 @@ struct patch_data { }; #endif +#ifdef CONFIG_LIVEPATCH_RESTRICT_KPROBE +/* + * Check whether a function has been registered with kprobes before patched. + * We can't patched this function util we unregistered the kprobes. + */ +struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + struct kprobe *kp; + int i; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + for (i = 0; i < func->old_size; i++) { + kp = get_kprobe(func->old_func + i); + if (kp) { + pr_err("func %s has been probed, (un)patch failed\n", + func->old_name); + return kp; + } + } + } + } + + return NULL; +} +#else +static inline struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) +{ + return NULL; +} +#endif /* CONFIG_LIVEPATCH_RESTRICT_KPROBE */ + static bool klp_is_module(struct klp_object *obj) { return obj->name; @@ -1107,6 +1144,11 @@ int klp_try_disable_patch(void *data) if (atomic_inc_return(&pd->cpu_count) == 1) { struct klp_patch *patch = pd->patch; + if (klp_check_patch_kprobed(patch)) { + atomic_inc(&pd->cpu_count); + return -EINVAL; + } + ret = klp_check_calltrace(patch, 0); if (ret) { atomic_inc(&pd->cpu_count); @@ -1258,6 +1300,11 @@ int klp_try_enable_patch(void *data) if (atomic_inc_return(&pd->cpu_count) == 1) { struct klp_patch *patch = pd->patch; + if (klp_check_patch_kprobed(patch)) { + atomic_inc(&pd->cpu_count); + return -EINVAL; + } + ret = klp_check_calltrace(patch, 1); if (ret) { atomic_inc(&pd->cpu_count);