From 000c0197ed376acfec8f0143a6b42c1ead96f9c7 Mon Sep 17 00:00:00 2001 From: Ye Weihua Date: Mon, 31 May 2021 19:24:09 +0800 Subject: [PATCH] livepatch: put memory alloc and free out stop machine hulk inclusion category: feature bugzilla: 51924 CVE: NA --------------------------- When a livepatch is insmod, stop machine will stop other cores, which interrupts services. Therefore, the shorter the stop machine duration, the better. The application and release of memory from the stop machine can shorten the time for stopping the machine. Especially, module_alloc and module_memfree is a kind of vmalloc, that may sleep when called. So it is not permitted to use them in stop machine context. Signed-off-by: Ye Weihua Reviewed-by: Yang Jihong Signed-off-by: Zheng Zengkai --- arch/arm/kernel/livepatch.c | 34 ++++++++++++++++++++++++++--- arch/arm64/kernel/livepatch.c | 35 ++++++++++++++++++++++++++---- arch/powerpc/kernel/livepatch_32.c | 35 ++++++++++++++++++++++++++---- arch/powerpc/kernel/livepatch_64.c | 34 +++++++++++++++++++++++++---- arch/x86/kernel/livepatch.c | 34 ++++++++++++++++++++++++++--- include/linux/livepatch.h | 1 + kernel/livepatch/core.c | 13 ++++++++++- 7 files changed, 167 insertions(+), 19 deletions(-) diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 6ec30741e9b3..b5fcaf3c4ca7 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -229,7 +229,7 @@ int arch_klp_patch_func(struct klp_func *func) func_node = klp_find_func_node(func->old_func); if (!func_node) { - func_node = kzalloc(sizeof(*func_node), GFP_ATOMIC); + func_node = func->func_node; if (!func_node) return -ENOMEM; @@ -246,7 +246,6 @@ int arch_klp_patch_func(struct klp_func *func) ret = arm_insn_read(func->old_func, &func_node->old_insn); #endif if (ret) { - kfree(func_node); return -EPERM; } list_add_rcu(&func_node->node, &klp_func_list); @@ -308,7 +307,6 @@ void arch_klp_unpatch_func(struct klp_func *func) #endif list_del_rcu(&func->stack_node); list_del_rcu(&func_node->node); - kfree(func_node); } else { list_del_rcu(&func->stack_node); next_func = list_first_or_null_rcu(&func_node->func_stack, @@ -364,3 +362,33 @@ int arch_klp_func_can_patch(struct klp_func *func) return 0; } #endif /* #ifdef CONFIG_ARM_MODULE_PLTS */ + +void arch_klp_mem_prepare(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func->func_node = kzalloc(sizeof(struct klp_func_node), + GFP_ATOMIC); + } + } +} + +void arch_klp_mem_recycle(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + struct klp_func_node *func_node; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func_node = func->func_node; + if (func_node && list_is_singular(&func_node->func_stack)) { + kfree(func_node); + func->func_node = NULL; + } + } + } +} diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index e1681b94d2fd..a1cd8ee026d7 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -215,7 +215,7 @@ int arch_klp_patch_func(struct klp_func *func) func_node = klp_find_func_node((unsigned long)func->old_func); if (!func_node) { - func_node = kzalloc(sizeof(*func_node), GFP_ATOMIC); + func_node = func->func_node; if (!func_node) return -ENOMEM; memory_flag = 1; @@ -235,7 +235,6 @@ int arch_klp_patch_func(struct klp_func *func) &func_node->old_insn); #endif if (ret) { - kfree(func_node); return -EPERM; } @@ -276,7 +275,6 @@ int arch_klp_patch_func(struct klp_func *func) list_del_rcu(&func->stack_node); if (memory_flag) { list_del_rcu(&func_node->node); - kfree(func_node); } return -EPERM; @@ -306,7 +304,6 @@ void arch_klp_unpatch_func(struct klp_func *func) #endif list_del_rcu(&func->stack_node); list_del_rcu(&func_node->node); - kfree(func_node); #ifdef CONFIG_ARM64_MODULE_PLTS for (i = 0; i < LJMP_INSN_SIZE; i++) { @@ -372,3 +369,33 @@ int arch_klp_func_can_patch(struct klp_func *func) return 0; } #endif + +void arch_klp_mem_prepare(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func->func_node = kzalloc(sizeof(struct klp_func_node), + GFP_ATOMIC); + } + } +} + +void arch_klp_mem_recycle(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + struct klp_func_node *func_node; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func_node = func->func_node; + if (func_node && list_is_singular(&func_node->func_stack)) { + kfree(func_node); + func->func_node = NULL; + } + } + } +} diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index df09451c934a..35d1885796d4 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -254,7 +254,7 @@ int arch_klp_patch_func(struct klp_func *func) func_node = klp_find_func_node(func->old_func); if (!func_node) { - func_node = kzalloc(sizeof(*func_node), GFP_ATOMIC); + func_node = func->func_node; if (!func_node) return -ENOMEM; @@ -265,7 +265,6 @@ int arch_klp_patch_func(struct klp_func *func) ret = copy_from_kernel_nofault(&func_node->old_insns[i], ((u32 *)func->old_func) + i, LJMP_INSN_SIZE); if (ret) { - kfree(func_node); return -EPERM; } } @@ -309,7 +308,6 @@ int arch_klp_patch_func(struct klp_func *func) list_del_rcu(&func->stack_node); if (memory_flag) { list_del_rcu(&func_node->node); - kfree(func_node); } return -EPERM; @@ -331,7 +329,6 @@ void arch_klp_unpatch_func(struct klp_func *func) list_del_rcu(&func->stack_node); list_del_rcu(&func_node->node); - kfree(func_node); for (i = 0; i < LJMP_INSN_SIZE; i++) patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), @@ -383,4 +380,34 @@ int arch_klp_func_can_patch(struct klp_func *func) } return 0; } + +void arch_klp_mem_prepare(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func->func_node = kzalloc(sizeof(struct klp_func_node), + GFP_ATOMIC); + } + } +} + +void arch_klp_mem_recycle(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + struct klp_func_node *func_node; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func_node = func->func_node; + if (func_node && list_is_singular(&func_node->func_stack)) { + kfree(func_node); + func->func_node = NULL; + } + } + } +} #endif diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index d4fb7bc8eee8..6285635e63fd 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -351,7 +351,7 @@ int arch_klp_patch_func(struct klp_func *func) func_node = klp_find_func_node(func->old_func); if (!func_node) { - func_node = module_alloc(sizeof(*func_node)); + func_node = func->func_node; if (!func_node) return -ENOMEM; @@ -362,7 +362,6 @@ int arch_klp_patch_func(struct klp_func *func) ret = copy_from_kernel_nofault(&func_node->old_insns[i], ((u32 *)func->old_func) + i, 4); if (ret) { - module_memfree(func_node); return -EPERM; } } @@ -393,7 +392,6 @@ int arch_klp_patch_func(struct klp_func *func) list_del_rcu(&func->stack_node); if (memory_flag) { list_del_rcu(&func_node->node); - module_memfree(func_node); } return -EPERM; @@ -415,7 +413,6 @@ void arch_klp_unpatch_func(struct klp_func *func) list_del_rcu(&func->stack_node); list_del_rcu(&func_node->node); - module_memfree(func_node); for (i = 0; i < LJMP_INSN_SIZE; i++) patch_instruction((struct ppc_inst *)((u32 *)pc + i), @@ -498,4 +495,33 @@ int arch_klp_init_func(struct klp_object *obj, struct klp_func *func) return 0; } + +void arch_klp_mem_prepare(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func->func_node = module_alloc(sizeof(struct klp_func_node)); + } + } +} + +void arch_klp_mem_recycle(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + struct klp_func_node *func_node; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func_node = func->func_node; + if (func_node && list_is_singular(&func_node->func_stack)) { + module_memfree(func_node); + func->func_node = NULL; + } + } + } +} #endif diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 5bcb726a1e76..bcfda2490916 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -284,7 +284,7 @@ int arch_klp_patch_func(struct klp_func *func) func_node = klp_find_func_node(func->old_func); ip = (unsigned long)func->old_func; if (!func_node) { - func_node = kzalloc(sizeof(*func_node), GFP_ATOMIC); + func_node = func->func_node; if (!func_node) return -ENOMEM; @@ -293,7 +293,6 @@ int arch_klp_patch_func(struct klp_func *func) ret = copy_from_kernel_nofault(func_node->old_code, (void *)ip, JMP_E9_INSN_SIZE); if (ret) { - kfree(func_node); return -EPERM; } list_add_rcu(&func_node->node, &klp_func_list); @@ -322,7 +321,6 @@ void arch_klp_unpatch_func(struct klp_func *func) list_del_rcu(&func->stack_node); list_del_rcu(&func_node->node); new = klp_old_code(func_node->old_code); - kfree(func_node); } else { list_del_rcu(&func->stack_node); next_func = list_first_or_null_rcu(&func_node->func_stack, @@ -335,4 +333,34 @@ void arch_klp_unpatch_func(struct klp_func *func) /* replace the text with the new text */ text_poke((void *)ip, new, JMP_E9_INSN_SIZE); } + +void arch_klp_mem_prepare(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func->func_node = kzalloc(sizeof(struct klp_func_node), + GFP_ATOMIC); + } + } +} + +void arch_klp_mem_recycle(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + struct klp_func_node *func_node; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func_node = func->func_node; + if (func_node && list_is_singular(&func_node->func_stack)) { + kfree(func_node); + func->func_node = NULL; + } + } + } +} #endif diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index c602ef123f77..8d228381ee28 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -86,6 +86,7 @@ struct klp_func { func_descr_t new_func_descr; #endif #endif + void *func_node; }; struct klp_object; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 6736c1e8af01..272f8730e176 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1283,6 +1283,13 @@ void __weak arch_klp_code_modify_post_process(void) { } +void __weak arch_klp_mem_prepare(struct klp_patch *patch) +{ +} + +void __weak arch_klp_mem_recycle(struct klp_patch *patch) +{ +} static int __klp_disable_patch(struct klp_patch *patch) { @@ -1306,6 +1313,7 @@ static int __klp_disable_patch(struct klp_patch *patch) arch_klp_code_modify_prepare(); ret = stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask); + arch_klp_mem_recycle(patch); arch_klp_code_modify_post_process(); if (ret) return ret; @@ -1463,10 +1471,13 @@ static int __klp_enable_patch(struct klp_patch *patch) #endif arch_klp_code_modify_prepare(); + arch_klp_mem_prepare(patch); ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); arch_klp_code_modify_post_process(); - if (ret) + if (ret) { + arch_klp_mem_recycle(patch); return ret; + } #ifndef CONFIG_LIVEPATCH_STACK /* move the enabled patch to the list tail */ -- GitLab