提交 283d8d87 编写于 作者: W Wang Wensheng 提交者: Zheng Zengkai

mm/sharepool: Clear the initialization of sp-associated structure for a process

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5DS9S
CVE: NA

-------------------------------------------------

A few structures must have been created when a process want to get into
sharepool subsystem, including allocating sharepool memory, being added
into a spg or doing k2u and so on.

Currently we create those structures just before we actually need them.
For example, we find or create a sp_spa_stat after a successful memory
allocation and before updating the statistical structure. The creation of
a new structure may fail due to oom and we should then reclaim the
memory allocated and revert all the process before. Or we just forget to
do that and a potential memory-leak occurs. This design makes it
confused when we indeed create a structure and we always worry about
potential memory-leak when we changes the code around it.

A better solution is to initialize all that structures at the same time
when a process join in sharepool subsystem. And in future, we will clear
the unnecessary statistical structures.
Signed-off-by: NWang Wensheng <wangwensheng4@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 447951e3
...@@ -507,10 +507,6 @@ static inline struct sp_proc_stat *sp_get_proc_stat_ref(struct mm_struct *mm) ...@@ -507,10 +507,6 @@ static inline struct sp_proc_stat *sp_get_proc_stat_ref(struct mm_struct *mm)
return NULL; return NULL;
} }
static inline void sp_proc_stat_drop(struct sp_proc_stat *stat)
{
}
static inline void spa_overview_show(struct seq_file *seq) static inline void spa_overview_show(struct seq_file *seq)
{ {
} }
......
...@@ -249,33 +249,22 @@ static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg) ...@@ -249,33 +249,22 @@ static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg)
return 0; return 0;
} }
static void free_sp_group_locked(struct sp_group *spg);
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg);
static struct sp_group *create_spg(int spg_id); static struct sp_group *create_spg(int spg_id);
static void free_new_spg_id(bool new, int spg_id); static void free_new_spg_id(bool new, int spg_id);
/* The caller must hold sp_group_sem */ static void free_sp_group_locked(struct sp_group *spg);
static struct sp_group_master *sp_init_group_master_locked( static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg);
struct mm_struct *mm, bool *exist) static int init_local_group(struct mm_struct *mm)
{ {
int spg_id, ret; int spg_id, ret;
struct sp_group *spg; struct sp_group *spg;
struct sp_mapping *spm;
struct sp_group_master *master = mm->sp_group_master; struct sp_group_master *master = mm->sp_group_master;
if (master) {
*exist = true;
return master;
}
master = kmalloc(sizeof(struct sp_group_master), GFP_KERNEL);
if (master == NULL)
return ERR_PTR(-ENOMEM);
spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_LOCAL_MIN, spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_LOCAL_MIN,
SPG_ID_LOCAL_MAX, GFP_ATOMIC); SPG_ID_LOCAL_MAX, GFP_ATOMIC);
if (spg_id < 0) { if (spg_id < 0) {
pr_err_ratelimited("generate local group id failed %d\n", spg_id); pr_err_ratelimited("generate local group id failed %d\n", spg_id);
ret = spg_id; return spg_id;
goto free_master;
} }
spg = create_spg(spg_id); spg = create_spg(spg_id);
...@@ -284,60 +273,73 @@ static struct sp_group_master *sp_init_group_master_locked( ...@@ -284,60 +273,73 @@ static struct sp_group_master *sp_init_group_master_locked(
goto free_spg_id; goto free_spg_id;
} }
INIT_LIST_HEAD(&master->node_list);
master->count = 0;
master->stat = NULL;
master->mm = mm;
master->local = spg; master->local = spg;
mm->sp_group_master = master; spm = sp_mapping_create(SP_MAPPING_DVPP);
if (IS_ERR(spm)) {
ret = PTR_ERR(spm);
goto free_spg;
}
sp_mapping_attach(master->local, spm);
sp_mapping_attach(master->local, sp_mapping_normal);
ret = local_group_add_task(mm, spg); ret = local_group_add_task(mm, spg);
if (ret < 0) if (ret < 0)
/* The spm would be released while destroying the spg*/
goto free_spg; goto free_spg;
*exist = false; return 0;
return master;
free_spg: free_spg:
free_sp_group_locked(spg); free_sp_group_locked(spg);
master->local = NULL;
free_spg_id: free_spg_id:
free_new_spg_id(true, spg_id); free_new_spg_id(true, spg_id);
free_master:
kfree(master);
return ERR_PTR(ret);
}
static inline bool is_local_group(int spg_id) return ret;
{
return spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX;
} }
/* static void sp_proc_stat_drop(struct sp_proc_stat *stat);
* If the process is added to a group first, the address space of the local static int sp_init_proc_stat(struct mm_struct *mm, struct task_struct *tsk);
* group of the process must have been set. If the process is not added to /* The caller must hold sp_group_sem */
* a group, directly create or attach the process to the corresponding DVPP static int sp_init_group_master_locked(struct task_struct *tsk, struct mm_struct *mm)
* and normal address space.
*/
static int sp_mapping_group_setup_local(struct mm_struct *mm)
{ {
int ret;
struct sp_group_master *master; struct sp_group_master *master;
struct sp_mapping *spm;
bool exist = false;
master = sp_init_group_master_locked(mm, &exist);
if (IS_ERR(master))
return PTR_ERR(master);
if (master->local->dvpp) if (mm->sp_group_master)
return 0; return 0;
spm = sp_mapping_create(SP_MAPPING_DVPP); master = kmalloc(sizeof(struct sp_group_master), GFP_KERNEL);
if (IS_ERR(spm)) if (!master)
return PTR_ERR(spm); return -ENOMEM;
sp_mapping_attach(master->local, spm);
sp_mapping_attach(master->local, sp_mapping_normal); INIT_LIST_HEAD(&master->node_list);
master->count = 0;
master->mm = mm;
mm->sp_group_master = master;
ret = sp_init_proc_stat(mm, tsk);
if (ret)
goto free_master;
ret = init_local_group(mm);
if (ret)
goto put_stat;
return 0; return 0;
put_stat:
sp_proc_stat_drop(master->stat);
free_master:
mm->sp_group_master = NULL;
kfree(master);
return ret;
}
static inline bool is_local_group(int spg_id)
{
return spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX;
} }
static struct sp_group *sp_get_local_group(struct mm_struct *mm) static struct sp_group *sp_get_local_group(struct mm_struct *mm)
...@@ -355,7 +357,7 @@ static struct sp_group *sp_get_local_group(struct mm_struct *mm) ...@@ -355,7 +357,7 @@ static struct sp_group *sp_get_local_group(struct mm_struct *mm)
up_read(&sp_group_sem); up_read(&sp_group_sem);
down_write(&sp_group_sem); down_write(&sp_group_sem);
ret = sp_mapping_group_setup_local(mm); ret = sp_init_group_master_locked(current, mm);
if (ret) { if (ret) {
up_write(&sp_group_sem); up_write(&sp_group_sem);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -403,37 +405,29 @@ static struct sp_proc_stat *create_proc_stat(struct mm_struct *mm, ...@@ -403,37 +405,29 @@ static struct sp_proc_stat *create_proc_stat(struct mm_struct *mm,
return stat; return stat;
} }
static struct sp_proc_stat *sp_init_proc_stat(struct sp_group_master *master, static int sp_init_proc_stat(struct mm_struct *mm, struct task_struct *tsk)
struct mm_struct *mm, struct task_struct *tsk)
{ {
struct sp_proc_stat *stat; struct sp_proc_stat *stat;
int alloc_id, tgid = tsk->tgid; int alloc_id, tgid = tsk->tgid;
struct sp_group_master *master = mm->sp_group_master;
down_write(&sp_proc_stat_sem);
stat = master->stat;
if (stat) {
up_write(&sp_proc_stat_sem);
return stat;
}
stat = create_proc_stat(mm, tsk); stat = create_proc_stat(mm, tsk);
if (IS_ERR(stat)) { if (IS_ERR(stat))
up_write(&sp_proc_stat_sem); return PTR_ERR(stat);
return stat;
}
down_write(&sp_proc_stat_sem);
alloc_id = idr_alloc(&sp_proc_stat_idr, stat, tgid, tgid + 1, GFP_KERNEL); alloc_id = idr_alloc(&sp_proc_stat_idr, stat, tgid, tgid + 1, GFP_KERNEL);
if (alloc_id < 0) { if (alloc_id < 0) {
up_write(&sp_proc_stat_sem); up_write(&sp_proc_stat_sem);
pr_err_ratelimited("proc stat idr alloc failed %d\n", alloc_id); pr_err_ratelimited("proc stat idr alloc failed %d\n", alloc_id);
kfree(stat); kfree(stat);
return ERR_PTR(alloc_id); return alloc_id;
} }
master->stat = stat; master->stat = stat;
up_write(&sp_proc_stat_sem); up_write(&sp_proc_stat_sem);
return stat; return 0;
} }
static void update_spg_stat_alloc(unsigned long size, bool inc, static void update_spg_stat_alloc(unsigned long size, bool inc,
...@@ -547,18 +541,14 @@ static struct spg_proc_stat *create_spg_proc_stat(int tgid, int spg_id) ...@@ -547,18 +541,14 @@ static struct spg_proc_stat *create_spg_proc_stat(int tgid, int spg_id)
return stat; return stat;
} }
static struct spg_proc_stat *sp_init_spg_proc_stat( static struct spg_proc_stat *sp_init_spg_proc_stat(struct sp_proc_stat *proc_stat,
struct sp_proc_stat *proc_stat, int tgid, struct sp_group *spg) struct sp_group *spg)
{ {
struct spg_proc_stat *stat; struct spg_proc_stat *stat;
int spg_id = spg->id; /* visit spg id locklessly */ int spg_id = spg->id; /* visit spg id locklessly */
struct sp_spg_stat *spg_stat = spg->stat; struct sp_spg_stat *spg_stat = spg->stat;
stat = find_spg_proc_stat(proc_stat, tgid, spg_id); stat = create_spg_proc_stat(proc_stat->tgid, spg_id);
if (stat)
return stat;
stat = create_spg_proc_stat(tgid, spg_id);
if (IS_ERR(stat)) if (IS_ERR(stat))
return stat; return stat;
...@@ -575,31 +565,6 @@ static struct spg_proc_stat *sp_init_spg_proc_stat( ...@@ -575,31 +565,6 @@ static struct spg_proc_stat *sp_init_spg_proc_stat(
return stat; return stat;
} }
/*
* The caller must
* 1. ensure no concurrency problem for task_struct and mm_struct.
* 2. hold sp_group_sem for sp_group_master (pay attention to ABBA deadlock)
*/
static struct spg_proc_stat *sp_init_process_stat(struct task_struct *tsk,
struct mm_struct *mm, struct sp_group *spg)
{
struct sp_group_master *master;
bool exist;
struct sp_proc_stat *proc_stat;
struct spg_proc_stat *spg_proc_stat;
master = sp_init_group_master_locked(mm, &exist);
if (IS_ERR(master))
return (struct spg_proc_stat *)master;
proc_stat = sp_init_proc_stat(master, mm, tsk);
if (IS_ERR(proc_stat))
return (struct spg_proc_stat *)proc_stat;
spg_proc_stat = sp_init_spg_proc_stat(proc_stat, tsk->tgid, spg);
return spg_proc_stat;
}
static struct sp_spg_stat *create_spg_stat(int spg_id) static struct sp_spg_stat *create_spg_stat(int spg_id)
{ {
struct sp_spg_stat *stat; struct sp_spg_stat *stat;
...@@ -846,9 +811,9 @@ static void sp_update_process_stat(struct task_struct *tsk, bool inc, ...@@ -846,9 +811,9 @@ static void sp_update_process_stat(struct task_struct *tsk, bool inc,
enum spa_type type = spa->type; enum spa_type type = spa->type;
down_write(&sp_group_sem); down_write(&sp_group_sem);
stat = sp_init_process_stat(tsk, tsk->mm, spa->spg); stat = find_spg_proc_stat(tsk->mm->sp_group_master->stat, tsk->tgid, spa->spg->id);
up_write(&sp_group_sem); up_write(&sp_group_sem);
if (unlikely(IS_ERR(stat))) if (!stat)
return; return;
update_spg_proc_stat(size, inc, stat, type); update_spg_proc_stat(size, inc, stat, type);
...@@ -1253,26 +1218,27 @@ static void sp_munmap_task_areas(struct mm_struct *mm, struct sp_group *spg, str ...@@ -1253,26 +1218,27 @@ static void sp_munmap_task_areas(struct mm_struct *mm, struct sp_group *spg, str
} }
/* the caller must hold sp_group_sem */ /* the caller must hold sp_group_sem */
static int mm_add_group_init(struct mm_struct *mm, struct sp_group *spg) static int mm_add_group_init(struct task_struct *tsk, struct mm_struct *mm,
struct sp_group *spg)
{ {
struct sp_group_master *master = mm->sp_group_master; int ret;
bool exist = false; struct sp_group_master *master;
master = sp_init_group_master_locked(mm, &exist);
if (IS_ERR(master))
return PTR_ERR(master);
if (!exist)
return 0;
if (is_process_in_group(spg, mm)) { if (!mm->sp_group_master) {
pr_err_ratelimited("task already in target group, id=%d\n", spg->id); ret = sp_init_group_master_locked(tsk, mm);
return -EEXIST; if (ret)
} return ret;
} else {
if (is_process_in_group(spg, mm)) {
pr_err_ratelimited("task already in target group, id=%d\n", spg->id);
return -EEXIST;
}
if (master->count == MAX_GROUP_FOR_TASK) { master = mm->sp_group_master;
pr_err("task reaches max group num\n"); if (master->count == MAX_GROUP_FOR_TASK) {
return -ENOSPC; pr_err("task reaches max group num\n");
return -ENOSPC;
}
} }
return 0; return 0;
...@@ -1311,29 +1277,13 @@ static int insert_spg_node(struct sp_group *spg, struct sp_group_node *node) ...@@ -1311,29 +1277,13 @@ static int insert_spg_node(struct sp_group *spg, struct sp_group_node *node)
spg->proc_num++; spg->proc_num++;
list_add_tail(&node->proc_node, &spg->procs); list_add_tail(&node->proc_node, &spg->procs);
return 0;
}
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg)
{
struct sp_group_node *node;
struct spg_proc_stat *stat;
node = create_spg_node(mm, PROT_READ | PROT_WRITE, spg);
if (IS_ERR(node))
return PTR_ERR(node);
/* use current just to avoid compile error, rebuild in following patch */
stat = sp_init_process_stat(current, mm, spg);
if (IS_ERR(stat)) {
free_sp_group_locked(spg);
pr_err_ratelimited("init process stat failed %lx\n", PTR_ERR(stat));
return PTR_ERR(stat);
}
insert_spg_node(spg, node);
mmget(mm);
/*
* The only way where sp_init_spg_proc_stat got failed is that there is no
* memory for sp_spg_stat. We will avoid this failure when we put sp_spg_stat
* into sp_group_node later.
*/
sp_init_spg_proc_stat(node->master->stat, spg);
return 0; return 0;
} }
...@@ -1356,6 +1306,20 @@ static void free_spg_node(struct mm_struct *mm, struct sp_group *spg, ...@@ -1356,6 +1306,20 @@ static void free_spg_node(struct mm_struct *mm, struct sp_group *spg,
kfree(spg_node); kfree(spg_node);
} }
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg)
{
struct sp_group_node *node;
node = create_spg_node(mm, PROT_READ | PROT_WRITE, spg);
if (IS_ERR(node))
return PTR_ERR(node);
insert_spg_node(spg, node);
mmget(mm);
return 0;
}
/** /**
* sp_group_add_task() - Add a process to an share group (sp_group). * sp_group_add_task() - Add a process to an share group (sp_group).
* @pid: the pid of the task to be added. * @pid: the pid of the task to be added.
...@@ -1380,7 +1344,6 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id) ...@@ -1380,7 +1344,6 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id)
int ret = 0; int ret = 0;
bool id_newly_generated = false; bool id_newly_generated = false;
struct sp_area *spa, *prev = NULL; struct sp_area *spa, *prev = NULL;
struct spg_proc_stat *stat;
check_interrupt_context(); check_interrupt_context();
...@@ -1483,29 +1446,26 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id) ...@@ -1483,29 +1446,26 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id)
} }
} }
ret = mm_add_group_init(mm, spg); down_write(&spg->rw_lock);
if (ret) ret = mm_add_group_init(tsk, mm, spg);
if (ret) {
up_write(&spg->rw_lock);
goto out_drop_group; goto out_drop_group;
}
ret = sp_mapping_group_setup(mm, spg); ret = sp_mapping_group_setup(mm, spg);
if (ret) if (ret) {
up_write(&spg->rw_lock);
goto out_drop_group; goto out_drop_group;
}
node = create_spg_node(mm, prot, spg); node = create_spg_node(mm, prot, spg);
if (unlikely(IS_ERR(node))) { if (unlikely(IS_ERR(node))) {
up_write(&spg->rw_lock);
ret = PTR_ERR(node); ret = PTR_ERR(node);
goto out_drop_group; goto out_drop_group;
} }
/* per process statistics initialization */
stat = sp_init_process_stat(tsk, mm, spg);
if (IS_ERR(stat)) {
ret = PTR_ERR(stat);
pr_err_ratelimited("init process stat failed %lx\n", PTR_ERR(stat));
goto out_drop_spg_node;
}
down_write(&spg->rw_lock);
ret = insert_spg_node(spg, node); ret = insert_spg_node(spg, node);
if (unlikely(ret)) { if (unlikely(ret)) {
up_write(&spg->rw_lock); up_write(&spg->rw_lock);
...@@ -1757,7 +1717,7 @@ int sp_id_of_current(void) ...@@ -1757,7 +1717,7 @@ int sp_id_of_current(void)
down_read(&sp_group_sem); down_read(&sp_group_sem);
master = current->mm->sp_group_master; master = current->mm->sp_group_master;
if (master && master->local) { if (master) {
spg_id = master->local->id; spg_id = master->local->id;
up_read(&sp_group_sem); up_read(&sp_group_sem);
return spg_id; return spg_id;
...@@ -1765,7 +1725,7 @@ int sp_id_of_current(void) ...@@ -1765,7 +1725,7 @@ int sp_id_of_current(void)
up_read(&sp_group_sem); up_read(&sp_group_sem);
down_write(&sp_group_sem); down_write(&sp_group_sem);
ret = sp_mapping_group_setup_local(current->mm); ret = sp_init_group_master_locked(current, current->mm);
if (ret) { if (ret) {
up_write(&sp_group_sem); up_write(&sp_group_sem);
return ret; return ret;
...@@ -2924,7 +2884,7 @@ static void *sp_make_share_kva_to_task(unsigned long kva, unsigned long size, un ...@@ -2924,7 +2884,7 @@ static void *sp_make_share_kva_to_task(unsigned long kva, unsigned long size, un
struct sp_group *spg; struct sp_group *spg;
down_write(&sp_group_sem); down_write(&sp_group_sem);
ret = sp_mapping_group_setup_local(current->mm); ret = sp_init_group_master_locked(current, current->mm);
if (ret) { if (ret) {
up_write(&sp_group_sem); up_write(&sp_group_sem);
pr_err_ratelimited("k2u_task init local mapping failed %d\n", ret); pr_err_ratelimited("k2u_task init local mapping failed %d\n", ret);
...@@ -2932,13 +2892,7 @@ static void *sp_make_share_kva_to_task(unsigned long kva, unsigned long size, un ...@@ -2932,13 +2892,7 @@ static void *sp_make_share_kva_to_task(unsigned long kva, unsigned long size, un
} }
spg = current->mm->sp_group_master->local; spg = current->mm->sp_group_master->local;
stat = sp_init_process_stat(current, current->mm, spg); stat = find_spg_proc_stat(current->mm->sp_group_master->stat, current->tgid, spg->id);
if (IS_ERR(stat)) {
up_write(&sp_group_sem);
pr_err_ratelimited("k2u_task init process stat failed %lx\n",
PTR_ERR(stat));
return stat;
}
up_write(&sp_group_sem); up_write(&sp_group_sem);
spa = sp_alloc_area(size, sp_flags, spg, SPA_TYPE_K2TASK, current->tgid); spa = sp_alloc_area(size, sp_flags, spg, SPA_TYPE_K2TASK, current->tgid);
...@@ -3959,7 +3913,7 @@ static void free_sp_proc_stat(struct sp_proc_stat *stat) ...@@ -3959,7 +3913,7 @@ static void free_sp_proc_stat(struct sp_proc_stat *stat)
} }
/* the caller make sure stat is not NULL */ /* the caller make sure stat is not NULL */
void sp_proc_stat_drop(struct sp_proc_stat *stat) static void sp_proc_stat_drop(struct sp_proc_stat *stat)
{ {
if (atomic_dec_and_test(&stat->use_count)) if (atomic_dec_and_test(&stat->use_count))
free_sp_proc_stat(stat); free_sp_proc_stat(stat);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册