提交 1811840c 编写于 作者: Z Zhang Jian 提交者: Yongqiang Liu

mm/sharepool: Fix sharepool node id invalid when using sp_alloc

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I54IL8
CVE: NA

-----------------------------

When passing numa id to sp_alloc, sometimes numa id does not work.
This is because memory policy will change numa id to a preferred one if
memory policy is set. Fix the error by mbind virtual address to desired
numa id.
Signed-off-by: NZhang Jian <zhangjian210@huawei.com>
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Signed-off-by: NWang Wensheng <wangwensheng4@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYongqiang Liu <liuyongqiang13@huawei.com>
上级 1177aad1
...@@ -219,6 +219,9 @@ static inline bool vma_migratable(struct vm_area_struct *vma) ...@@ -219,6 +219,9 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *); extern void mpol_put_task_policy(struct task_struct *);
extern long __do_mbind(unsigned long start, unsigned long len,
unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags, struct mm_struct *mm);
#else #else
struct mempolicy {}; struct mempolicy {};
...@@ -322,5 +325,12 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, ...@@ -322,5 +325,12 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
static inline void mpol_put_task_policy(struct task_struct *task) static inline void mpol_put_task_policy(struct task_struct *task)
{ {
} }
static long __do_mbind(unsigned long start, unsigned long len,
unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags, struct mm_struct *mm)
{
return 0;
}
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
#endif #endif
...@@ -1259,11 +1259,10 @@ static struct page *new_page(struct page *page, unsigned long start) ...@@ -1259,11 +1259,10 @@ static struct page *new_page(struct page *page, unsigned long start)
} }
#endif #endif
static long do_mbind(unsigned long start, unsigned long len, long __do_mbind(unsigned long start, unsigned long len,
unsigned short mode, unsigned short mode_flags, unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags) nodemask_t *nmask, unsigned long flags, struct mm_struct *mm)
{ {
struct mm_struct *mm = current->mm;
struct mempolicy *new; struct mempolicy *new;
unsigned long end; unsigned long end;
int err; int err;
...@@ -1364,6 +1363,13 @@ static long do_mbind(unsigned long start, unsigned long len, ...@@ -1364,6 +1363,13 @@ static long do_mbind(unsigned long start, unsigned long len,
return err; return err;
} }
static long do_mbind(unsigned long start, unsigned long len,
unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags)
{
return __do_mbind(start, len, mode, mode_flags, nmask, flags, current->mm);
}
/* /*
* User space interface with variable sized bitmaps for nodelists. * User space interface with variable sized bitmaps for nodelists.
*/ */
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#define pr_fmt(fmt) "share pool: " fmt #define pr_fmt(fmt) "share pool: " fmt
#include <linux/share_pool.h> #include <linux/share_pool.h>
...@@ -2174,6 +2173,7 @@ struct sp_alloc_context { ...@@ -2174,6 +2173,7 @@ struct sp_alloc_context {
bool need_fallocate; bool need_fallocate;
struct timespec64 start; struct timespec64 start;
struct timespec64 end; struct timespec64 end;
bool have_mbind;
}; };
static void trace_sp_alloc_begin(struct sp_alloc_context *ac) static void trace_sp_alloc_begin(struct sp_alloc_context *ac)
...@@ -2316,6 +2316,7 @@ static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags, ...@@ -2316,6 +2316,7 @@ static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags,
ac->sp_flags = sp_flags; ac->sp_flags = sp_flags;
ac->state = ALLOC_NORMAL; ac->state = ALLOC_NORMAL;
ac->need_fallocate = false; ac->need_fallocate = false;
ac->have_mbind = false;
return 0; return 0;
} }
...@@ -2409,7 +2410,7 @@ static void sp_alloc_fallback(struct sp_area *spa, struct sp_alloc_context *ac) ...@@ -2409,7 +2410,7 @@ static void sp_alloc_fallback(struct sp_area *spa, struct sp_alloc_context *ac)
} }
static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa, static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa,
struct sp_group_node *spg_node, struct sp_alloc_context *ac) struct sp_alloc_context *ac)
{ {
int ret = 0; int ret = 0;
unsigned long sp_addr = spa->va_start; unsigned long sp_addr = spa->va_start;
...@@ -2441,25 +2442,19 @@ static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa, ...@@ -2441,25 +2442,19 @@ static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa,
if (ret) if (ret)
sp_add_work_compact(); sp_add_work_compact();
} }
if (ret) {
if (spa->spg != spg_none)
sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);
else
sp_munmap(mm, spa->va_start, spa->real_size);
if (unlikely(fatal_signal_pending(current)))
pr_warn_ratelimited("allocation failed, current thread is killed\n");
else
pr_warn_ratelimited("allocation failed due to mm populate failed"
"(potential no enough memory when -12): %d\n", ret);
sp_fallocate(spa); /* need this, otherwise memleak */
sp_alloc_fallback(spa, ac);
} else {
ac->need_fallocate = true;
}
return ret; return ret;
} }
static long sp_mbind(struct mm_struct *mm, unsigned long start, unsigned long len, unsigned long node)
{
nodemask_t nmask;
nodes_clear(nmask);
node_set(node, nmask);
return __do_mbind(start, len, MPOL_BIND, MPOL_F_STATIC_NODES,
&nmask, MPOL_MF_STRICT, mm);
}
static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
struct sp_group_node *spg_node, struct sp_alloc_context *ac) struct sp_group_node *spg_node, struct sp_alloc_context *ac)
{ {
...@@ -2475,7 +2470,34 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, ...@@ -2475,7 +2470,34 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
return ret; return ret;
} }
ret = sp_alloc_populate(mm, spa, spg_node, ac); if (!ac->have_mbind) {
ret = sp_mbind(mm, spa->va_start, spa->real_size, spa->node_id);
if (ret < 0) {
pr_err("cannot bind the memory range to specified node:%d, err:%d\n",
spa->node_id, ret);
goto err;
}
ac->have_mbind = true;
}
ret = sp_alloc_populate(mm, spa, ac);
if (ret) {
err:
if (spa->spg != spg_none)
sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);
else
sp_munmap(mm, spa->va_start, spa->real_size);
if (unlikely(fatal_signal_pending(current)))
pr_warn_ratelimited("allocation failed, current thread is killed\n");
else
pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n",
ret);
sp_fallocate(spa); /* need this, otherwise memleak */
sp_alloc_fallback(spa, ac);
} else
ac->need_fallocate = true;
return ret; return ret;
} }
...@@ -2497,11 +2519,6 @@ static int sp_alloc_mmap_populate(struct sp_area *spa, ...@@ -2497,11 +2519,6 @@ static int sp_alloc_mmap_populate(struct sp_area *spa,
if (mmap_ret) { if (mmap_ret) {
if (ac->state != ALLOC_COREDUMP) if (ac->state != ALLOC_COREDUMP)
return mmap_ret; return mmap_ret;
if (ac->spg == spg_none) {
sp_alloc_unmap(mm, spa, spg_node);
pr_err("dvpp allocation failed due to coredump");
return mmap_ret;
}
ac->state = ALLOC_NORMAL; ac->state = ALLOC_NORMAL;
continue; continue;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册