提交 7e8d223e 编写于 作者: C Cheng Jian 提交者: Xie XiuQi

livepatch/core: fix cache consistency when disable patch

euler inclusion
category: bugfix
bugzilla: 5507
CVE: NA

-------------------------------------------------

Independent instruction cache and data cache are used on
the aarch64 cpu chip. so we must flush the instruction
cache when enable/disable patch.

we miss it when disable patch, so just fix it.
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
Reviewed-by: NLi Bin <huawei.libin@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 08ac533f
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include "patch.h" #include "patch.h"
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#ifdef CONFIG_LIVEPATCH_FTRACE #ifdef CONFIG_LIVEPATCH_FTRACE
#include "transition.h" #include "transition.h"
#endif #endif
...@@ -56,6 +57,13 @@ static LIST_HEAD(klp_patches); ...@@ -56,6 +57,13 @@ static LIST_HEAD(klp_patches);
static struct kobject *klp_root_kobj; static struct kobject *klp_root_kobj;
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
struct patch_data {
struct klp_patch *patch;
atomic_t cpu_count;
};
#endif
static bool klp_is_module(struct klp_object *obj) static bool klp_is_module(struct klp_object *obj)
{ {
return obj->name; return obj->name;
...@@ -356,20 +364,47 @@ static int disable_patch(struct klp_patch *patch) ...@@ -356,20 +364,47 @@ static int disable_patch(struct klp_patch *patch)
int klp_try_disable_patch(void *data) int klp_try_disable_patch(void *data)
{ {
struct klp_patch *patch = data;
int ret = 0; int ret = 0;
int flag = 0;
struct patch_data *pd = (struct patch_data *)data;
ret = klp_check_calltrace(patch, 0); if (atomic_inc_return(&pd->cpu_count) == 1) {
if (ret) struct klp_patch *patch = pd->patch;
return ret;
ret = klp_check_calltrace(patch, 1);
if (ret) {
flag = 1;
atomic_inc(&pd->cpu_count);
return ret;
}
ret = disable_patch(patch);
if (ret) {
flag = 1;
atomic_inc(&pd->cpu_count);
return ret;
}
atomic_inc(&pd->cpu_count);
} else {
while (atomic_read(&pd->cpu_count) <= num_online_cpus())
cpu_relax();
if (!flag)
klp_smp_isb();
}
ret = disable_patch(patch);
return ret; return ret;
} }
static int __klp_disable_patch(struct klp_patch *patch) static int __klp_disable_patch(struct klp_patch *patch)
{ {
int ret; int ret;
struct patch_data patch_data = {
.patch = patch,
.cpu_count = ATOMIC_INIT(0),
};
if (WARN_ON(!patch->enabled))
return -EINVAL;
#ifdef CONFIG_LIVEPATCH_STACK #ifdef CONFIG_LIVEPATCH_STACK
/* enforce stacking: only the last enabled patch can be disabled */ /* enforce stacking: only the last enabled patch can be disabled */
...@@ -380,7 +415,7 @@ static int __klp_disable_patch(struct klp_patch *patch) ...@@ -380,7 +415,7 @@ static int __klp_disable_patch(struct klp_patch *patch)
} }
#endif #endif
ret = stop_machine(klp_try_disable_patch, patch, NULL); ret = stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask);
return ret; return ret;
} }
...@@ -525,11 +560,6 @@ static int enable_patch(struct klp_patch *patch) ...@@ -525,11 +560,6 @@ static int enable_patch(struct klp_patch *patch)
return ret; return ret;
} }
struct patch_data {
struct klp_patch *patch;
atomic_t cpu_count;
};
int klp_try_enable_patch(void *data) int klp_try_enable_patch(void *data)
{ {
int ret = 0; int ret = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册