提交 4fc8fa79 编写于 作者: W Wang Wensheng 提交者: Yongqiang Liu

mm/sharepool: Use vm_private_data to store the spa

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5EORS
CVE: NA

--------------------------------

When we destroy a vma, we first find the spa depending on the
vma->vm_start, during which we should hold the sp_area_lock.
While we store the spa in vma, we can get the spa directly.
Don't worry if the spa exists or if it's to be freed soon, since
we have increaced the refcount for the spa when it's mappend into
a vma.
Signed-off-by: NWang Wensheng <wangwensheng4@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NYongqiang Liu <liuyongqiang13@huawei.com>
上级 79c93cdf
...@@ -875,7 +875,7 @@ static inline bool check_aoscore_process(struct task_struct *tsk) ...@@ -875,7 +875,7 @@ static inline bool check_aoscore_process(struct task_struct *tsk)
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
struct sp_area *spa, unsigned long *populate, struct sp_area *spa, unsigned long *populate,
unsigned long prot); unsigned long prot, struct vm_area_struct **pvma);
static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size); static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size);
#define K2U_NORMAL 0 #define K2U_NORMAL 0
...@@ -1527,7 +1527,7 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id) ...@@ -1527,7 +1527,7 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id)
break; break;
} }
addr = sp_mmap(mm, file, spa, &populate, prot); addr = sp_mmap(mm, file, spa, &populate, prot, NULL);
if (IS_ERR_VALUE(addr)) { if (IS_ERR_VALUE(addr)) {
sp_munmap_task_areas(mm, spg, &spa->link); sp_munmap_task_areas(mm, spg, &spa->link);
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
...@@ -2059,8 +2059,6 @@ static void __sp_area_drop(struct sp_area *spa) ...@@ -2059,8 +2059,6 @@ static void __sp_area_drop(struct sp_area *spa)
void sp_area_drop(struct vm_area_struct *vma) void sp_area_drop(struct vm_area_struct *vma)
{ {
struct sp_area *spa;
if (!(vma->vm_flags & VM_SHARE_POOL)) if (!(vma->vm_flags & VM_SHARE_POOL))
return; return;
...@@ -2072,9 +2070,7 @@ void sp_area_drop(struct vm_area_struct *vma) ...@@ -2072,9 +2070,7 @@ void sp_area_drop(struct vm_area_struct *vma)
* an atomic operation. * an atomic operation.
*/ */
spin_lock(&sp_area_lock); spin_lock(&sp_area_lock);
spa = __find_sp_area_locked(vma->vm_mm->sp_group_master->local, __sp_area_drop_locked(vma->vm_private_data);
vma->vm_start);
__sp_area_drop_locked(spa);
spin_unlock(&sp_area_lock); spin_unlock(&sp_area_lock);
} }
...@@ -2342,7 +2338,7 @@ EXPORT_SYMBOL_GPL(mg_sp_free); ...@@ -2342,7 +2338,7 @@ EXPORT_SYMBOL_GPL(mg_sp_free);
/* wrapper of __do_mmap() and the caller must hold down_write(&mm->mmap_sem). */ /* wrapper of __do_mmap() and the caller must hold down_write(&mm->mmap_sem). */
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
struct sp_area *spa, unsigned long *populate, struct sp_area *spa, unsigned long *populate,
unsigned long prot) unsigned long prot, struct vm_area_struct **pvma)
{ {
unsigned long addr = spa->va_start; unsigned long addr = spa->va_start;
unsigned long size = spa_size(spa); unsigned long size = spa_size(spa);
...@@ -2350,6 +2346,7 @@ static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, ...@@ -2350,6 +2346,7 @@ static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
MAP_SHARE_POOL; MAP_SHARE_POOL;
unsigned long vm_flags = VM_NORESERVE | VM_SHARE_POOL | VM_DONTCOPY; unsigned long vm_flags = VM_NORESERVE | VM_SHARE_POOL | VM_DONTCOPY;
unsigned long pgoff = addr_offset(spa) >> PAGE_SHIFT; unsigned long pgoff = addr_offset(spa) >> PAGE_SHIFT;
struct vm_area_struct *vma;
/* Mark the mapped region to be locked. After the MAP_LOCKED is enable, /* Mark the mapped region to be locked. After the MAP_LOCKED is enable,
* multiple tasks will preempt resources, causing performance loss. * multiple tasks will preempt resources, causing performance loss.
...@@ -2365,8 +2362,13 @@ static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, ...@@ -2365,8 +2362,13 @@ static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
pr_err("do_mmap fails %ld\n", addr); pr_err("do_mmap fails %ld\n", addr);
} else { } else {
BUG_ON(addr != spa->va_start); BUG_ON(addr != spa->va_start);
vma = find_vma(mm, addr);
vma->vm_private_data = spa;
if (pvma)
*pvma = vma;
} }
return addr; return addr;
} }
...@@ -2512,7 +2514,6 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa, ...@@ -2512,7 +2514,6 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
unsigned long mmap_addr; unsigned long mmap_addr;
/* pass through default permission */ /* pass through default permission */
unsigned long prot = PROT_READ | PROT_WRITE; unsigned long prot = PROT_READ | PROT_WRITE;
unsigned long sp_addr = spa->va_start;
unsigned long populate = 0; unsigned long populate = 0;
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -2531,7 +2532,7 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa, ...@@ -2531,7 +2532,7 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
prot = PROT_READ; prot = PROT_READ;
/* when success, mmap_addr == spa->va_start */ /* when success, mmap_addr == spa->va_start */
mmap_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot); mmap_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
if (IS_ERR_VALUE(mmap_addr)) { if (IS_ERR_VALUE(mmap_addr)) {
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
sp_alloc_unmap(mm, spa, spg_node); sp_alloc_unmap(mm, spa, spg_node);
...@@ -2547,14 +2548,6 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa, ...@@ -2547,14 +2548,6 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
} }
ac->populate = populate; ac->populate = populate;
vma = find_vma(mm, sp_addr);
if (unlikely(!vma)) {
up_write(&mm->mmap_sem);
WARN(1, "allocation failed, can't find %lx vma\n", sp_addr);
ret = -EINVAL;
goto unmap;
}
if (ac->sp_flags & SP_PROT_RO) if (ac->sp_flags & SP_PROT_RO)
vma->vm_flags &= ~VM_MAYWRITE; vma->vm_flags &= ~VM_MAYWRITE;
...@@ -2851,15 +2844,12 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa, ...@@ -2851,15 +2844,12 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa,
if (kc && kc->sp_flags & SP_PROT_RO) if (kc && kc->sp_flags & SP_PROT_RO)
prot = PROT_READ; prot = PROT_READ;
ret_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot); ret_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
if (IS_ERR_VALUE(ret_addr)) { if (IS_ERR_VALUE(ret_addr)) {
pr_debug("k2u mmap failed %lx\n", ret_addr); pr_debug("k2u mmap failed %lx\n", ret_addr);
goto put_mm; goto put_mm;
} }
BUG_ON(ret_addr != spa->va_start);
vma = find_vma(mm, ret_addr);
BUG_ON(vma == NULL);
if (prot & PROT_WRITE) if (prot & PROT_WRITE)
vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY); vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);
...@@ -3884,13 +3874,9 @@ int sp_node_id(struct vm_area_struct *vma) ...@@ -3884,13 +3874,9 @@ int sp_node_id(struct vm_area_struct *vma)
if (!enable_ascend_share_pool) if (!enable_ascend_share_pool)
return node_id; return node_id;
if (vma && vma->vm_flags & VM_SHARE_POOL) { if (vma && vma->vm_flags & VM_SHARE_POOL && vma->vm_private_data) {
spa = __find_sp_area(vma->vm_mm->sp_group_master->local, spa = vma->vm_private_data;
vma->vm_start); node_id = spa->node_id;
if (spa) {
node_id = spa->node_id;
__sp_area_drop(spa);
}
} }
return node_id; return node_id;
...@@ -4472,13 +4458,12 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -4472,13 +4458,12 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
int node_id; int node_id;
struct sp_area *spa; struct sp_area *spa;
spa = __find_sp_area(mm->sp_group_master->local, vma->vm_start); spa = vma->vm_private_data;
if (!spa) { if (!spa) {
pr_err("share pool: vma is invalid, not from sp mmap\n"); pr_err("share pool: vma is invalid, not from sp mmap\n");
return ret; return ret;
} }
node_id = spa->node_id; node_id = spa->node_id;
__sp_area_drop(spa);
retry: retry:
page = find_lock_page(mapping, idx); page = find_lock_page(mapping, idx);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册