提交 08ab8789 编写于 作者: Z Zhou Guanghui 提交者: Yang Yingliang

ascend: share pool: Only memory of current process is allowed to u2k/k2u

ascend inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4EUVI
CVE: NA
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Reviewed-by: NDing Tianhong <dingtianhong@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 affabe05
...@@ -2220,15 +2220,17 @@ static bool vmalloc_area_clr_flag(struct sp_area *spa, unsigned long kva, unsign ...@@ -2220,15 +2220,17 @@ static bool vmalloc_area_clr_flag(struct sp_area *spa, unsigned long kva, unsign
} }
/** /**
* sp_make_share_k2u() - Share kernel memory to a specified process or sp_group. * sp_make_share_k2u() - Share kernel memory to current process or an sp_group.
* @kva: the VA of shared kernel memory. * @kva: the VA of shared kernel memory.
* @size: the size of shared kernel memory. * @size: the size of shared kernel memory.
* @sp_flags: how to allocate the memory. We only support SP_DVPP. * @sp_flags: how to allocate the memory. We only support SP_DVPP.
* @pid: the pid of the specified process * @pid: the pid of the specified process (Not currently in use).
* @spg_id: the share group that the memory is shared to. * @spg_id: the share group that the memory is shared to.
* *
* Use spg_id of current thread if spg_id == SPG_ID_DEFAULT. * Return: the shared target user address to start at
* Share kernel memory to a specified task if spg_id == SPG_ID_NONE. *
* Share kernel memory to current task if spg_id == SPG_ID_NONE
* or SPG_ID_DEFAULT in multi-group mode.
* *
* Return: * Return:
* * if succeed, return the shared user address to start at. * * if succeed, return the shared user address to start at.
...@@ -2243,9 +2245,8 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, ...@@ -2243,9 +2245,8 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size,
unsigned long kva_aligned; unsigned long kva_aligned;
unsigned long size_aligned; unsigned long size_aligned;
unsigned int page_size = PAGE_SIZE; unsigned int page_size = PAGE_SIZE;
struct task_struct *tsk; struct mm_struct *mm = current->mm;
struct mm_struct *mm; int is_hugepage;
int ret = 0, is_hugepage;
check_interrupt_context(); check_interrupt_context();
...@@ -2254,6 +2255,11 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, ...@@ -2254,6 +2255,11 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (mm == NULL) {
pr_err_ratelimited("k2u: kthread is not allowed\n");
return ERR_PTR(-EPERM);
}
is_hugepage = is_vmap_hugepage(kva); is_hugepage = is_vmap_hugepage(kva);
if (is_hugepage > 0) { if (is_hugepage > 0) {
sp_flags |= SP_HUGEPAGE; sp_flags |= SP_HUGEPAGE;
...@@ -2269,50 +2275,30 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, ...@@ -2269,50 +2275,30 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size,
kva_aligned = ALIGN_DOWN(kva, page_size); kva_aligned = ALIGN_DOWN(kva, page_size);
size_aligned = ALIGN(kva + size, page_size) - kva_aligned; size_aligned = ALIGN(kva + size, page_size) - kva_aligned;
rcu_read_lock(); spg = get_first_group(mm);
tsk = find_task_by_vpid(pid);
if (!tsk || (tsk->flags & PF_EXITING))
ret = -ESRCH;
else
get_task_struct(tsk);
rcu_read_unlock();
if (ret)
return ERR_PTR(ret);
mm = get_task_mm(tsk);
if (mm == NULL) {
uva = ERR_PTR(-ESRCH);
goto out_put_task;
}
spg = __sp_find_spg(pid, SPG_ID_DEFAULT);
if (spg == NULL) { if (spg == NULL) {
/* k2u to task */ /* k2u to task */
struct spg_proc_stat *stat; struct spg_proc_stat *stat;
if (spg_id != SPG_ID_NONE && spg_id != SPG_ID_DEFAULT) { if (spg_id != SPG_ID_NONE && spg_id != SPG_ID_DEFAULT) {
pr_err_ratelimited("share pool: k2task invalid spg id %d\n", spg_id); pr_err_ratelimited("share pool: k2task invalid spg id %d\n", spg_id);
uva = ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
goto out_put_mm;
} }
down_write(&sp_group_sem); down_write(&sp_group_sem);
stat = sp_init_process_stat(tsk, mm, spg_none); stat = sp_init_process_stat(current, mm, spg_none);
up_write(&sp_group_sem); up_write(&sp_group_sem);
if (IS_ERR(stat)) { if (IS_ERR(stat)) {
uva = stat;
pr_err_ratelimited("share pool: k2u(task) init process stat failed, ret %lx\n", pr_err_ratelimited("share pool: k2u(task) init process stat failed, ret %lx\n",
PTR_ERR(stat)); PTR_ERR(stat));
goto out_put_mm; return stat;
} }
spa = sp_alloc_area(size_aligned, sp_flags, spg_none, SPA_TYPE_K2TASK, tsk->tgid); spa = sp_alloc_area(size_aligned, sp_flags, spg_none, SPA_TYPE_K2TASK, current->tgid);
if (IS_ERR(spa)) { if (IS_ERR(spa)) {
pr_err_ratelimited("share pool: k2u(task) failed due to alloc spa failure " pr_err_ratelimited("share pool: k2u(task) failed due to alloc spa failure "
"(potential no enough virtual memory when -75): %ld\n", PTR_ERR(spa)); "(potential no enough virtual memory when -75): %ld\n", PTR_ERR(spa));
uva = spa; return spa;
goto out_put_mm;
} }
if (!vmalloc_area_set_flag(spa, kva_aligned, VM_SHAREPOOL)) { if (!vmalloc_area_set_flag(spa, kva_aligned, VM_SHAREPOOL)) {
...@@ -2340,9 +2326,9 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, ...@@ -2340,9 +2326,9 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size,
} }
if (enable_share_k2u_spg) if (enable_share_k2u_spg)
spa = sp_alloc_area(size_aligned, sp_flags, spg, SPA_TYPE_K2SPG, tsk->tgid); spa = sp_alloc_area(size_aligned, sp_flags, spg, SPA_TYPE_K2SPG, current->tgid);
else else
spa = sp_alloc_area(size_aligned, sp_flags, spg_none, SPA_TYPE_K2TASK, tsk->tgid); spa = sp_alloc_area(size_aligned, sp_flags, spg_none, SPA_TYPE_K2TASK, current->tgid);
if (IS_ERR(spa)) { if (IS_ERR(spa)) {
up_read(&spg->rw_lock); up_read(&spg->rw_lock);
...@@ -2370,7 +2356,7 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, ...@@ -2370,7 +2356,7 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size,
up_read(&spg->rw_lock); up_read(&spg->rw_lock);
if (!IS_ERR(uva)) if (!IS_ERR(uva))
sp_update_process_stat(tsk, true, spa); sp_update_process_stat(current, true, spa);
finish: finish:
if (!IS_ERR(uva)) { if (!IS_ERR(uva)) {
...@@ -2387,10 +2373,6 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, ...@@ -2387,10 +2373,6 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size,
out_drop_spg: out_drop_spg:
if (spg) if (spg)
sp_group_drop(spg); sp_group_drop(spg);
out_put_mm:
mmput(mm);
out_put_task:
put_task_struct(tsk);
sp_dump_stack(); sp_dump_stack();
return uva; return uva;
...@@ -2605,7 +2587,7 @@ static void __sp_walk_page_free(struct sp_walk_data *data) ...@@ -2605,7 +2587,7 @@ static void __sp_walk_page_free(struct sp_walk_data *data)
* sp_make_share_u2k() - Share user memory of a specified process to kernel. * sp_make_share_u2k() - Share user memory of a specified process to kernel.
* @uva: the VA of shared user memory * @uva: the VA of shared user memory
* @size: the size of shared user memory * @size: the size of shared user memory
* @pid: the pid of the specified process * @pid: the pid of the specified process(Not currently in use)
* *
* Return: * Return:
* * if success, return the starting kernel address of the shared memory. * * if success, return the starting kernel address of the shared memory.
...@@ -2614,8 +2596,7 @@ static void __sp_walk_page_free(struct sp_walk_data *data) ...@@ -2614,8 +2596,7 @@ static void __sp_walk_page_free(struct sp_walk_data *data)
void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid) void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid)
{ {
int ret = 0; int ret = 0;
struct task_struct *tsk; struct mm_struct *mm = current->mm;
struct mm_struct *mm;
void *p = ERR_PTR(-ESRCH); void *p = ERR_PTR(-ESRCH);
struct sp_walk_data sp_walk_data = { struct sp_walk_data sp_walk_data = {
.page_count = 0, .page_count = 0,
...@@ -2624,34 +2605,23 @@ void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid) ...@@ -2624,34 +2605,23 @@ void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid)
check_interrupt_context(); check_interrupt_context();
rcu_read_lock(); if (mm == NULL) {
tsk = find_task_by_vpid(pid); pr_err("u2k: kthread is not allowed\n");
if (!tsk || (tsk->flags & PF_EXITING)) return ERR_PTR(-EPERM);
ret = -ESRCH; }
else
get_task_struct(tsk);
rcu_read_unlock();
if (ret)
goto out;
mm = get_task_mm(tsk);
if (mm == NULL)
goto out_put_task;
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
if (unlikely(mm->core_state)) { if (unlikely(mm->core_state)) {
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
pr_err("share pool: u2k: encountered coredump, abort\n"); pr_err("share pool: u2k: encountered coredump, abort\n");
mmput(mm); return p;
goto out_put_task;
} }
ret = __sp_walk_page_range(uva, size, mm, &sp_walk_data); ret = __sp_walk_page_range(uva, size, mm, &sp_walk_data);
if (ret) { if (ret) {
pr_err_ratelimited("share pool: walk page range failed, ret %d\n", ret); pr_err_ratelimited("share pool: walk page range failed, ret %d\n", ret);
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
mmput(mm); return ERR_PTR(ret);
p = ERR_PTR(ret);
goto out_put_task;
} }
if (sp_walk_data.is_hugepage) if (sp_walk_data.is_hugepage)
...@@ -2661,17 +2631,15 @@ void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid) ...@@ -2661,17 +2631,15 @@ void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid)
p = vmap(sp_walk_data.pages, sp_walk_data.page_count, VM_MAP, p = vmap(sp_walk_data.pages, sp_walk_data.page_count, VM_MAP,
PAGE_KERNEL); PAGE_KERNEL);
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
mmput(mm);
if (!p) { if (!p) {
pr_err("share pool: vmap(huge) in u2k failed\n"); pr_err("share pool: vmap(huge) in u2k failed\n");
__sp_walk_page_free(&sp_walk_data); __sp_walk_page_free(&sp_walk_data);
p = ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
goto out_put_task;
} else {
p = p + (uva - sp_walk_data.uva_aligned);
} }
p = p + (uva - sp_walk_data.uva_aligned);
/* /*
* kva p may be used later in k2u. Since p comes from uva originally, * kva p may be used later in k2u. Since p comes from uva originally,
* it's reasonable to add flag VM_USERMAP so that p can be remapped * it's reasonable to add flag VM_USERMAP so that p can be remapped
...@@ -2681,9 +2649,6 @@ void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid) ...@@ -2681,9 +2649,6 @@ void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid)
area->flags |= VM_USERMAP; area->flags |= VM_USERMAP;
kvfree(sp_walk_data.pages); kvfree(sp_walk_data.pages);
out_put_task:
put_task_struct(tsk);
out:
return p; return p;
} }
EXPORT_SYMBOL_GPL(sp_make_share_u2k); EXPORT_SYMBOL_GPL(sp_make_share_u2k);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册