提交 447951e3 编写于 作者: W Wang Wensheng 提交者: Zheng Zengkai

mm/sharepool: Unify the memory allocation process

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5DS9S
CVE: NA

-------------------------------------------------

There are two types of memory allocated from sharepool: passthrough
memory for DVPP and shared memory. Currently, we branch to different
routines depending on the memory type, both during the allocation and
free process. Since we have already create a local group for passthrough
memory, with just one step ahead, we could drop the redundant branches
in allocation and free process and in all the fallback process when an
error occurs.

Here is the content of this patch:
1. Add erery process to its local group when initilizing its group_master.
2. Avoid to return the local group in find_sp_group_id_by_pid().
3. Delete the redundant branches during allocation and free process.
Signed-off-by: NWang Wensheng <wangwensheng4@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 331c3817
...@@ -249,13 +249,15 @@ static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg) ...@@ -249,13 +249,15 @@ static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg)
return 0; return 0;
} }
static void free_sp_group_locked(struct sp_group *spg);
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg);
static struct sp_group *create_spg(int spg_id); static struct sp_group *create_spg(int spg_id);
static void free_new_spg_id(bool new, int spg_id); static void free_new_spg_id(bool new, int spg_id);
/* The caller must hold sp_group_sem */ /* The caller must hold sp_group_sem */
static struct sp_group_master *sp_init_group_master_locked( static struct sp_group_master *sp_init_group_master_locked(
struct mm_struct *mm, bool *exist) struct mm_struct *mm, bool *exist)
{ {
int spg_id; int spg_id, ret;
struct sp_group *spg; struct sp_group *spg;
struct sp_group_master *master = mm->sp_group_master; struct sp_group_master *master = mm->sp_group_master;
...@@ -271,16 +273,15 @@ static struct sp_group_master *sp_init_group_master_locked( ...@@ -271,16 +273,15 @@ static struct sp_group_master *sp_init_group_master_locked(
spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_LOCAL_MIN, spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_LOCAL_MIN,
SPG_ID_LOCAL_MAX, GFP_ATOMIC); SPG_ID_LOCAL_MAX, GFP_ATOMIC);
if (spg_id < 0) { if (spg_id < 0) {
kfree(master);
pr_err_ratelimited("generate local group id failed %d\n", spg_id); pr_err_ratelimited("generate local group id failed %d\n", spg_id);
return ERR_PTR(spg_id); ret = spg_id;
goto free_master;
} }
spg = create_spg(spg_id); spg = create_spg(spg_id);
if (IS_ERR(spg)) { if (IS_ERR(spg)) {
free_new_spg_id(true, spg_id); ret = PTR_ERR(spg);
kfree(master); goto free_spg_id;
return (struct sp_group_master *)spg;
} }
INIT_LIST_HEAD(&master->node_list); INIT_LIST_HEAD(&master->node_list);
...@@ -290,8 +291,20 @@ static struct sp_group_master *sp_init_group_master_locked( ...@@ -290,8 +291,20 @@ static struct sp_group_master *sp_init_group_master_locked(
master->local = spg; master->local = spg;
mm->sp_group_master = master; mm->sp_group_master = master;
ret = local_group_add_task(mm, spg);
if (ret < 0)
goto free_spg;
*exist = false; *exist = false;
return master; return master;
free_spg:
free_sp_group_locked(spg);
free_spg_id:
free_new_spg_id(true, spg_id);
free_master:
kfree(master);
return ERR_PTR(ret);
} }
static inline bool is_local_group(int spg_id) static inline bool is_local_group(int spg_id)
...@@ -670,6 +683,8 @@ static struct sp_overall_stat sp_overall_stat; ...@@ -670,6 +683,8 @@ static struct sp_overall_stat sp_overall_stat;
enum spa_type { enum spa_type {
SPA_TYPE_ALLOC = 1, SPA_TYPE_ALLOC = 1,
/* NOTE: reorganize after the statisical structure is reconstructed. */
SPA_TYPE_ALLOC_PRIVATE = SPA_TYPE_ALLOC,
SPA_TYPE_K2TASK, SPA_TYPE_K2TASK,
SPA_TYPE_K2SPG, SPA_TYPE_K2SPG,
}; };
...@@ -1037,7 +1052,7 @@ EXPORT_SYMBOL_GPL(sp_group_id_by_pid); ...@@ -1037,7 +1052,7 @@ EXPORT_SYMBOL_GPL(sp_group_id_by_pid);
*/ */
int mg_sp_group_id_by_pid(int pid, int *spg_ids, int *num) int mg_sp_group_id_by_pid(int pid, int *spg_ids, int *num)
{ {
int ret = 0; int ret = 0, real_count;
struct sp_group_node *node; struct sp_group_node *node;
struct sp_group_master *master = NULL; struct sp_group_master *master = NULL;
struct task_struct *tsk; struct task_struct *tsk;
...@@ -1062,18 +1077,28 @@ int mg_sp_group_id_by_pid(int pid, int *spg_ids, int *num) ...@@ -1062,18 +1077,28 @@ int mg_sp_group_id_by_pid(int pid, int *spg_ids, int *num)
goto out_up_read; goto out_up_read;
} }
if (!master->count) { /*
* There is a local group for each process which is used for
* passthrough allocation. The local group is a internal
* implementation for convenience and is not attempt to bother
* the user.
*/
real_count = master->count - 1;
if (real_count <= 0) {
ret = -ENODEV; ret = -ENODEV;
goto out_up_read; goto out_up_read;
} }
if ((unsigned int)*num < master->count) { if ((unsigned int)*num < real_count) {
ret = -E2BIG; ret = -E2BIG;
goto out_up_read; goto out_up_read;
} }
*num = master->count; *num = real_count;
list_for_each_entry(node, &master->node_list, group_node) list_for_each_entry(node, &master->node_list, group_node) {
if (is_local_group(node->spg->id))
continue;
*(spg_ids++) = node->spg->id; *(spg_ids++) = node->spg->id;
}
out_up_read: out_up_read:
up_read(&sp_group_sem); up_read(&sp_group_sem);
...@@ -1245,7 +1270,7 @@ static int mm_add_group_init(struct mm_struct *mm, struct sp_group *spg) ...@@ -1245,7 +1270,7 @@ static int mm_add_group_init(struct mm_struct *mm, struct sp_group *spg)
return -EEXIST; return -EEXIST;
} }
if (master->count + 1 == MAX_GROUP_FOR_TASK) { if (master->count == MAX_GROUP_FOR_TASK) {
pr_err("task reaches max group num\n"); pr_err("task reaches max group num\n");
return -ENOSPC; return -ENOSPC;
} }
...@@ -1289,6 +1314,29 @@ static int insert_spg_node(struct sp_group *spg, struct sp_group_node *node) ...@@ -1289,6 +1314,29 @@ static int insert_spg_node(struct sp_group *spg, struct sp_group_node *node)
return 0; return 0;
} }
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg)
{
struct sp_group_node *node;
struct spg_proc_stat *stat;
node = create_spg_node(mm, PROT_READ | PROT_WRITE, spg);
if (IS_ERR(node))
return PTR_ERR(node);
/* use current just to avoid compile error, rebuild in following patch */
stat = sp_init_process_stat(current, mm, spg);
if (IS_ERR(stat)) {
free_sp_group_locked(spg);
pr_err_ratelimited("init process stat failed %lx\n", PTR_ERR(stat));
return PTR_ERR(stat);
}
insert_spg_node(spg, node);
mmget(mm);
return 0;
}
/* the caller must down_write(&spg->rw_lock) */ /* the caller must down_write(&spg->rw_lock) */
static void delete_spg_node(struct sp_group *spg, struct sp_group_node *node) static void delete_spg_node(struct sp_group *spg, struct sp_group_node *node)
{ {
...@@ -2160,15 +2208,10 @@ static void sp_fallocate(struct sp_area *spa) ...@@ -2160,15 +2208,10 @@ static void sp_fallocate(struct sp_area *spa)
static void sp_free_unmap_fallocate(struct sp_area *spa) static void sp_free_unmap_fallocate(struct sp_area *spa)
{ {
if (!is_local_group(spa->spg->id)) { down_read(&spa->spg->rw_lock);
down_read(&spa->spg->rw_lock); __sp_free(spa->spg, spa->va_start, spa_size(spa), NULL);
__sp_free(spa->spg, spa->va_start, spa_size(spa), NULL); sp_fallocate(spa);
sp_fallocate(spa); up_read(&spa->spg->rw_lock);
up_read(&spa->spg->rw_lock);
} else {
sp_munmap(current->mm, spa->va_start, spa_size(spa));
sp_fallocate(spa);
}
} }
static int sp_check_caller_permission(struct sp_group *spg, struct mm_struct *mm) static int sp_check_caller_permission(struct sp_group *spg, struct mm_struct *mm)
...@@ -2176,9 +2219,10 @@ static int sp_check_caller_permission(struct sp_group *spg, struct mm_struct *mm ...@@ -2176,9 +2219,10 @@ static int sp_check_caller_permission(struct sp_group *spg, struct mm_struct *mm
int ret = 0; int ret = 0;
down_read(&spg->rw_lock); down_read(&spg->rw_lock);
if (!is_local_group(spg->id) && !is_process_in_group(spg, mm)) if (!is_process_in_group(spg, mm))
ret = -EPERM; ret = -EPERM;
up_read(&spg->rw_lock); up_read(&spg->rw_lock);
return ret; return ret;
} }
...@@ -2363,6 +2407,7 @@ struct sp_alloc_context { ...@@ -2363,6 +2407,7 @@ struct sp_alloc_context {
struct timespec64 start; struct timespec64 start;
struct timespec64 end; struct timespec64 end;
bool have_mbind; bool have_mbind;
enum spa_type type;
}; };
static void trace_sp_alloc_begin(struct sp_alloc_context *ac) static void trace_sp_alloc_begin(struct sp_alloc_context *ac)
...@@ -2450,10 +2495,13 @@ static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags, ...@@ -2450,10 +2495,13 @@ static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags,
pr_err_ratelimited("allocation failed, task not in group\n"); pr_err_ratelimited("allocation failed, task not in group\n");
return -ENODEV; return -ENODEV;
} }
ac->type = SPA_TYPE_ALLOC;
} else { /* allocation pass through scene */ } else { /* allocation pass through scene */
spg = sp_get_local_group(current->mm); spg = sp_get_local_group(current->mm);
if (IS_ERR(spg)) if (IS_ERR(spg))
return PTR_ERR(spg); return PTR_ERR(spg);
down_read(&spg->rw_lock);
ac->type = SPA_TYPE_ALLOC_PRIVATE;
} }
if (sp_flags & SP_HUGEPAGE) { if (sp_flags & SP_HUGEPAGE) {
...@@ -2476,8 +2524,7 @@ static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags, ...@@ -2476,8 +2524,7 @@ static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags,
static void sp_alloc_unmap(struct mm_struct *mm, struct sp_area *spa, static void sp_alloc_unmap(struct mm_struct *mm, struct sp_area *spa,
struct sp_group_node *spg_node) struct sp_group_node *spg_node)
{ {
if (!is_local_group(spa->spg->id)) __sp_free(spa->spg, spa->va_start, spa->real_size, mm);
__sp_free(spa->spg, spa->va_start, spa->real_size, mm);
} }
static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa, static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
...@@ -2532,10 +2579,7 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa, ...@@ -2532,10 +2579,7 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
return ret; return ret;
unmap: unmap:
if (!is_local_group(spa->spg->id)) sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);
sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);
else
sp_munmap(mm, spa->va_start, spa->real_size);
return ret; return ret;
} }
...@@ -2635,10 +2679,7 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, ...@@ -2635,10 +2679,7 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
ret = sp_alloc_populate(mm, spa, ac); ret = sp_alloc_populate(mm, spa, ac);
if (ret) { if (ret) {
err: err:
if (!is_local_group(spa->spg->id)) sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);
sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);
else
sp_munmap(mm, spa->va_start, spa->real_size);
if (unlikely(fatal_signal_pending(current))) if (unlikely(fatal_signal_pending(current)))
pr_warn_ratelimited("allocation failed, current thread is killed\n"); pr_warn_ratelimited("allocation failed, current thread is killed\n");
...@@ -2661,34 +2702,30 @@ static int sp_alloc_mmap_populate(struct sp_area *spa, ...@@ -2661,34 +2702,30 @@ static int sp_alloc_mmap_populate(struct sp_area *spa,
struct mm_struct *mm; struct mm_struct *mm;
struct sp_group_node *spg_node; struct sp_group_node *spg_node;
if (is_local_group(spa->spg->id)) { /* create mapping for each process in the group */
ret = __sp_alloc_mmap_populate(current->mm, spa, NULL, ac); list_for_each_entry(spg_node, &spa->spg->procs, proc_node) {
} else { mm = spg_node->master->mm;
/* create mapping for each process in the group */ mmap_ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac);
list_for_each_entry(spg_node, &spa->spg->procs, proc_node) { if (mmap_ret) {
mm = spg_node->master->mm; if (ac->state != ALLOC_COREDUMP)
mmap_ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac); return mmap_ret;
if (mmap_ret) { ac->state = ALLOC_NORMAL;
if (ac->state != ALLOC_COREDUMP) continue;
return mmap_ret;
ac->state = ALLOC_NORMAL;
continue;
}
ret = mmap_ret;
} }
ret = mmap_ret;
} }
return ret; return ret;
} }
/* spa maybe an error pointer, so introduce variable spg */ /* spa maybe an error pointer, so introduce variable spg */
static void sp_alloc_finish(int result, struct sp_area *spa, static void sp_alloc_finish(int result, struct sp_area *spa,
struct sp_alloc_context *ac) struct sp_alloc_context *ac)
{ {
struct sp_group *spg = ac->spg; struct sp_group *spg = ac->spg;
/* match sp_alloc_prepare */ /* match sp_alloc_prepare */
if (!is_local_group(spg->id)) up_read(&spg->rw_lock);
up_read(&spg->rw_lock);
if (!result) if (!result)
sp_update_process_stat(current, true, spa); sp_update_process_stat(current, true, spa);
...@@ -2728,7 +2765,7 @@ void *sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id) ...@@ -2728,7 +2765,7 @@ void *sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id)
try_again: try_again:
spa = sp_alloc_area(ac.size_aligned, ac.sp_flags, ac.spg, spa = sp_alloc_area(ac.size_aligned, ac.sp_flags, ac.spg,
SPA_TYPE_ALLOC, current->tgid); ac.type, current->tgid);
if (IS_ERR(spa)) { if (IS_ERR(spa)) {
pr_err_ratelimited("alloc spa failed in allocation(potential no enough virtual memory when -75): %ld\n", pr_err_ratelimited("alloc spa failed in allocation(potential no enough virtual memory when -75): %ld\n",
PTR_ERR(spa)); PTR_ERR(spa));
...@@ -4650,8 +4687,6 @@ void sp_group_post_exit(struct mm_struct *mm) ...@@ -4650,8 +4687,6 @@ void sp_group_post_exit(struct mm_struct *mm)
} }
up_write(&sp_group_sem); up_write(&sp_group_sem);
if (master->local)
sp_group_drop(master->local);
kfree(master); kfree(master);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册