提交 000c0197 编写于 作者: Y Ye Weihua 提交者: Zheng Zengkai

livepatch: put memory alloc and free out stop machine

hulk inclusion
category: feature
bugzilla: 51924
CVE: NA

---------------------------

When a livepatch is insmod, stop machine will stop other cores, which
interrupts services. Therefore, the shorter the stop machine duration,
the better. The application and release of memory from the stop machine
can shorten the time for stopping the machine.

Especially, module_alloc and module_memfree is a kind of vmalloc, that
may sleep when called. So it is not permitted to use them in stop machine
context.
Signed-off-by: NYe Weihua <yeweihua4@huawei.com>
Reviewed-by: NYang Jihong <yangjihong1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 673d37a2
......@@ -229,7 +229,7 @@ int arch_klp_patch_func(struct klp_func *func)
func_node = klp_find_func_node(func->old_func);
if (!func_node) {
func_node = kzalloc(sizeof(*func_node), GFP_ATOMIC);
func_node = func->func_node;
if (!func_node)
return -ENOMEM;
......@@ -246,7 +246,6 @@ int arch_klp_patch_func(struct klp_func *func)
ret = arm_insn_read(func->old_func, &func_node->old_insn);
#endif
if (ret) {
kfree(func_node);
return -EPERM;
}
list_add_rcu(&func_node->node, &klp_func_list);
......@@ -308,7 +307,6 @@ void arch_klp_unpatch_func(struct klp_func *func)
#endif
list_del_rcu(&func->stack_node);
list_del_rcu(&func_node->node);
kfree(func_node);
} else {
list_del_rcu(&func->stack_node);
next_func = list_first_or_null_rcu(&func_node->func_stack,
......@@ -364,3 +362,33 @@ int arch_klp_func_can_patch(struct klp_func *func)
return 0;
}
#endif /* #ifdef CONFIG_ARM_MODULE_PLTS */
void arch_klp_mem_prepare(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
func->func_node = kzalloc(sizeof(struct klp_func_node),
GFP_ATOMIC);
}
}
}
void arch_klp_mem_recycle(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
struct klp_func_node *func_node;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
func_node = func->func_node;
if (func_node && list_is_singular(&func_node->func_stack)) {
kfree(func_node);
func->func_node = NULL;
}
}
}
}
......@@ -215,7 +215,7 @@ int arch_klp_patch_func(struct klp_func *func)
func_node = klp_find_func_node((unsigned long)func->old_func);
if (!func_node) {
func_node = kzalloc(sizeof(*func_node), GFP_ATOMIC);
func_node = func->func_node;
if (!func_node)
return -ENOMEM;
memory_flag = 1;
......@@ -235,7 +235,6 @@ int arch_klp_patch_func(struct klp_func *func)
&func_node->old_insn);
#endif
if (ret) {
kfree(func_node);
return -EPERM;
}
......@@ -276,7 +275,6 @@ int arch_klp_patch_func(struct klp_func *func)
list_del_rcu(&func->stack_node);
if (memory_flag) {
list_del_rcu(&func_node->node);
kfree(func_node);
}
return -EPERM;
......@@ -306,7 +304,6 @@ void arch_klp_unpatch_func(struct klp_func *func)
#endif
list_del_rcu(&func->stack_node);
list_del_rcu(&func_node->node);
kfree(func_node);
#ifdef CONFIG_ARM64_MODULE_PLTS
for (i = 0; i < LJMP_INSN_SIZE; i++) {
......@@ -372,3 +369,33 @@ int arch_klp_func_can_patch(struct klp_func *func)
return 0;
}
#endif
void arch_klp_mem_prepare(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
func->func_node = kzalloc(sizeof(struct klp_func_node),
GFP_ATOMIC);
}
}
}
void arch_klp_mem_recycle(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
struct klp_func_node *func_node;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
func_node = func->func_node;
if (func_node && list_is_singular(&func_node->func_stack)) {
kfree(func_node);
func->func_node = NULL;
}
}
}
}
......@@ -254,7 +254,7 @@ int arch_klp_patch_func(struct klp_func *func)
func_node = klp_find_func_node(func->old_func);
if (!func_node) {
func_node = kzalloc(sizeof(*func_node), GFP_ATOMIC);
func_node = func->func_node;
if (!func_node)
return -ENOMEM;
......@@ -265,7 +265,6 @@ int arch_klp_patch_func(struct klp_func *func)
ret = copy_from_kernel_nofault(&func_node->old_insns[i],
((u32 *)func->old_func) + i, LJMP_INSN_SIZE);
if (ret) {
kfree(func_node);
return -EPERM;
}
}
......@@ -309,7 +308,6 @@ int arch_klp_patch_func(struct klp_func *func)
list_del_rcu(&func->stack_node);
if (memory_flag) {
list_del_rcu(&func_node->node);
kfree(func_node);
}
return -EPERM;
......@@ -331,7 +329,6 @@ void arch_klp_unpatch_func(struct klp_func *func)
list_del_rcu(&func->stack_node);
list_del_rcu(&func_node->node);
kfree(func_node);
for (i = 0; i < LJMP_INSN_SIZE; i++)
patch_instruction((struct ppc_inst *)(((u32 *)pc) + i),
......@@ -383,4 +380,34 @@ int arch_klp_func_can_patch(struct klp_func *func)
}
return 0;
}
void arch_klp_mem_prepare(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
func->func_node = kzalloc(sizeof(struct klp_func_node),
GFP_ATOMIC);
}
}
}
void arch_klp_mem_recycle(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
struct klp_func_node *func_node;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
func_node = func->func_node;
if (func_node && list_is_singular(&func_node->func_stack)) {
kfree(func_node);
func->func_node = NULL;
}
}
}
}
#endif
......@@ -351,7 +351,7 @@ int arch_klp_patch_func(struct klp_func *func)
func_node = klp_find_func_node(func->old_func);
if (!func_node) {
func_node = module_alloc(sizeof(*func_node));
func_node = func->func_node;
if (!func_node)
return -ENOMEM;
......@@ -362,7 +362,6 @@ int arch_klp_patch_func(struct klp_func *func)
ret = copy_from_kernel_nofault(&func_node->old_insns[i],
((u32 *)func->old_func) + i, 4);
if (ret) {
module_memfree(func_node);
return -EPERM;
}
}
......@@ -393,7 +392,6 @@ int arch_klp_patch_func(struct klp_func *func)
list_del_rcu(&func->stack_node);
if (memory_flag) {
list_del_rcu(&func_node->node);
module_memfree(func_node);
}
return -EPERM;
......@@ -415,7 +413,6 @@ void arch_klp_unpatch_func(struct klp_func *func)
list_del_rcu(&func->stack_node);
list_del_rcu(&func_node->node);
module_memfree(func_node);
for (i = 0; i < LJMP_INSN_SIZE; i++)
patch_instruction((struct ppc_inst *)((u32 *)pc + i),
......@@ -498,4 +495,33 @@ int arch_klp_init_func(struct klp_object *obj, struct klp_func *func)
return 0;
}
void arch_klp_mem_prepare(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
func->func_node = module_alloc(sizeof(struct klp_func_node));
}
}
}
void arch_klp_mem_recycle(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
struct klp_func_node *func_node;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
func_node = func->func_node;
if (func_node && list_is_singular(&func_node->func_stack)) {
module_memfree(func_node);
func->func_node = NULL;
}
}
}
}
#endif
......@@ -284,7 +284,7 @@ int arch_klp_patch_func(struct klp_func *func)
func_node = klp_find_func_node(func->old_func);
ip = (unsigned long)func->old_func;
if (!func_node) {
func_node = kzalloc(sizeof(*func_node), GFP_ATOMIC);
func_node = func->func_node;
if (!func_node)
return -ENOMEM;
......@@ -293,7 +293,6 @@ int arch_klp_patch_func(struct klp_func *func)
ret = copy_from_kernel_nofault(func_node->old_code,
(void *)ip, JMP_E9_INSN_SIZE);
if (ret) {
kfree(func_node);
return -EPERM;
}
list_add_rcu(&func_node->node, &klp_func_list);
......@@ -322,7 +321,6 @@ void arch_klp_unpatch_func(struct klp_func *func)
list_del_rcu(&func->stack_node);
list_del_rcu(&func_node->node);
new = klp_old_code(func_node->old_code);
kfree(func_node);
} else {
list_del_rcu(&func->stack_node);
next_func = list_first_or_null_rcu(&func_node->func_stack,
......@@ -335,4 +333,34 @@ void arch_klp_unpatch_func(struct klp_func *func)
/* replace the text with the new text */
text_poke((void *)ip, new, JMP_E9_INSN_SIZE);
}
void arch_klp_mem_prepare(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
func->func_node = kzalloc(sizeof(struct klp_func_node),
GFP_ATOMIC);
}
}
}
void arch_klp_mem_recycle(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
struct klp_func_node *func_node;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
func_node = func->func_node;
if (func_node && list_is_singular(&func_node->func_stack)) {
kfree(func_node);
func->func_node = NULL;
}
}
}
}
#endif
......@@ -86,6 +86,7 @@ struct klp_func {
func_descr_t new_func_descr;
#endif
#endif
void *func_node;
};
struct klp_object;
......
......@@ -1283,6 +1283,13 @@ void __weak arch_klp_code_modify_post_process(void)
{
}
void __weak arch_klp_mem_prepare(struct klp_patch *patch)
{
}
void __weak arch_klp_mem_recycle(struct klp_patch *patch)
{
}
static int __klp_disable_patch(struct klp_patch *patch)
{
......@@ -1306,6 +1313,7 @@ static int __klp_disable_patch(struct klp_patch *patch)
arch_klp_code_modify_prepare();
ret = stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask);
arch_klp_mem_recycle(patch);
arch_klp_code_modify_post_process();
if (ret)
return ret;
......@@ -1463,10 +1471,13 @@ static int __klp_enable_patch(struct klp_patch *patch)
#endif
arch_klp_code_modify_prepare();
arch_klp_mem_prepare(patch);
ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask);
arch_klp_code_modify_post_process();
if (ret)
if (ret) {
arch_klp_mem_recycle(patch);
return ret;
}
#ifndef CONFIG_LIVEPATCH_STACK
/* move the enabled patch to the list tail */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册