提交 dec0a0c0 编写于 作者: P Peng Wu 提交者: Yang Yingliang

share_pool: Alloc sp memory on a specified memory node

ascend inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4EUVI
CVE: NA

-------------------------------------------

Adding a function for getting node id, which can be used to alloc
share pool memory on a specified memory node.
Signed-off-by: NPeng Wu <wupeng58@huawei.com>
Reviewed-by: NDing Tianhong <dingtianhong@huawei.com>
Reviewed-by: Nchenweilong <chenweilong@huawei.com>
Reviewed-by: NTang Yizhou <tangyizhou@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 34116d14
...@@ -10,6 +10,10 @@ ...@@ -10,6 +10,10 @@
#define SP_HUGEPAGE (1 << 0) #define SP_HUGEPAGE (1 << 0)
#define SP_HUGEPAGE_ONLY (1 << 1) #define SP_HUGEPAGE_ONLY (1 << 1)
#define SP_DVPP (1 << 2) #define SP_DVPP (1 << 2)
#define DEVICE_ID_MASK 0x3ff
#define DEVICE_ID_SHIFT 32
#define SP_FLAG_MASK (SP_HUGEPAGE | SP_HUGEPAGE_ONLY | SP_DVPP | \
(_AC(DEVICE_ID_MASK, UL) << DEVICE_ID_SHIFT))
#define SPG_ID_NONE -1 /* not associated with sp_group, only for specified thread */ #define SPG_ID_NONE -1 /* not associated with sp_group, only for specified thread */
#define SPG_ID_DEFAULT 0 /* use the spg id of current thread */ #define SPG_ID_DEFAULT 0 /* use the spg id of current thread */
...@@ -22,7 +26,7 @@ ...@@ -22,7 +26,7 @@
#define SPG_ID_DVPP_PASS_THROUGH_MAX 899999 #define SPG_ID_DVPP_PASS_THROUGH_MAX 899999
#define SPG_ID_DVPP_PASS_THROUGH 900000 #define SPG_ID_DVPP_PASS_THROUGH 900000
#define MAX_DEVID 1 /* the max num of Da-vinci devices */ #define MAX_DEVID 2 /* the max num of Da-vinci devices */
/* to align the pointer to the (next) PMD boundary */ /* to align the pointer to the (next) PMD boundary */
#define PMD_ALIGN(addr) ALIGN(addr, PMD_SIZE) #define PMD_ALIGN(addr) ALIGN(addr, PMD_SIZE)
...@@ -54,9 +58,9 @@ extern bool vmap_allow_huge; ...@@ -54,9 +58,9 @@ extern bool vmap_allow_huge;
* |-------------------- 8T -------------------|---|------ 8T ------------| * |-------------------- 8T -------------------|---|------ 8T ------------|
* | Device 0 | Device 1 |...| | * | Device 0 | Device 1 |...| |
* |----------------------------------------------------------------------| * |----------------------------------------------------------------------|
* |- 16G -|- 16G -|- 16G -|- 16G -| | | | | * |------------- 16G -------------| 16G | | |
* | DVPP GROUP0 | DVPP GROUP1 | ... | ... |...| sp normal memory | * | DVPP GROUP0 | DVPP GROUP1 | ... | ... |...| sp normal memory |
* | svm | sp | svm | sp | | | | | * | sp | sp | | | | |
* |----------------------------------------------------------------------| * |----------------------------------------------------------------------|
* *
* The host SVM feature reserves 8T virtual memory by mmap, and due to the * The host SVM feature reserves 8T virtual memory by mmap, and due to the
...@@ -181,6 +185,7 @@ extern void sp_proc_stat_drop(struct sp_proc_stat *stat); ...@@ -181,6 +185,7 @@ extern void sp_proc_stat_drop(struct sp_proc_stat *stat);
extern void spa_overview_show(struct seq_file *seq); extern void spa_overview_show(struct seq_file *seq);
extern void spg_overview_show(struct seq_file *seq); extern void spg_overview_show(struct seq_file *seq);
extern void proc_sharepool_init(void); extern void proc_sharepool_init(void);
extern int sp_node_id(struct vm_area_struct *vma);
static inline struct task_struct *sp_get_task(struct mm_struct *mm) static inline struct task_struct *sp_get_task(struct mm_struct *mm)
{ {
...@@ -485,6 +490,10 @@ static inline void sp_free_pages(struct page *page, struct vm_struct *area) ...@@ -485,6 +490,10 @@ static inline void sp_free_pages(struct page *page, struct vm_struct *area)
{ {
} }
static inline int sp_node_id(struct vm_area_struct *vma)
{
return numa_node_id();
}
#endif #endif
#endif /* LINUX_SHARE_POOL_H */ #endif /* LINUX_SHARE_POOL_H */
...@@ -241,6 +241,7 @@ struct sp_area { ...@@ -241,6 +241,7 @@ struct sp_area {
struct mm_struct *mm; /* owner of k2u(task) */ struct mm_struct *mm; /* owner of k2u(task) */
unsigned long kva; /* shared kva */ unsigned long kva; /* shared kva */
pid_t applier; /* the original applier process */ pid_t applier; /* the original applier process */
int node_id; /* memory node */
}; };
static DEFINE_SPINLOCK(sp_area_lock); static DEFINE_SPINLOCK(sp_area_lock);
static struct rb_root sp_area_root = RB_ROOT; static struct rb_root sp_area_root = RB_ROOT;
...@@ -863,11 +864,13 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, ...@@ -863,11 +864,13 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags,
unsigned long vend = MMAP_SHARE_POOL_16G_START; unsigned long vend = MMAP_SHARE_POOL_16G_START;
unsigned long addr; unsigned long addr;
unsigned long size_align = PMD_ALIGN(size); /* va aligned to 2M */ unsigned long size_align = PMD_ALIGN(size); /* va aligned to 2M */
int node_id = (flags >> DEVICE_ID_SHIFT) & DEVICE_ID_MASK;
if ((flags & SP_DVPP)) { if ((flags & SP_DVPP)) {
if (sp_area_customized == false) { if (sp_area_customized == false) {
vstart = MMAP_SHARE_POOL_16G_START; vstart = MMAP_SHARE_POOL_16G_START +
vend = MMAP_SHARE_POOL_16G_START + MMAP_SHARE_POOL_16G_SIZE; node_id * MMAP_SHARE_POOL_16G_SIZE;
vend = vstart + MMAP_SHARE_POOL_16G_SIZE;
} else { } else {
if (!spg) { if (!spg) {
pr_err_ratelimited("share pool: don't allow k2u(task) in host svm multiprocess scene\n"); pr_err_ratelimited("share pool: don't allow k2u(task) in host svm multiprocess scene\n");
...@@ -878,7 +881,7 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, ...@@ -878,7 +881,7 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags,
} }
} }
spa = kmalloc(sizeof(struct sp_area), GFP_KERNEL); spa = __kmalloc_node(sizeof(struct sp_area), GFP_KERNEL, node_id);
if (unlikely(!spa)) { if (unlikely(!spa)) {
pr_err_ratelimited("share pool: alloc spa failed due to lack of memory\n"); pr_err_ratelimited("share pool: alloc spa failed due to lack of memory\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -973,6 +976,7 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, ...@@ -973,6 +976,7 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags,
spa->mm = NULL; spa->mm = NULL;
spa->kva = 0; /* NULL pointer */ spa->kva = 0; /* NULL pointer */
spa->applier = applier; spa->applier = applier;
spa->node_id = node_id;
if (spa_inc_usage(type, size, (flags & SP_DVPP))) { if (spa_inc_usage(type, size, (flags & SP_DVPP))) {
err = ERR_PTR(-EINVAL); err = ERR_PTR(-EINVAL);
...@@ -1379,7 +1383,7 @@ void *sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id) ...@@ -1379,7 +1383,7 @@ void *sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (sp_flags & ~(SP_HUGEPAGE_ONLY | SP_HUGEPAGE | SP_DVPP)) { if (sp_flags & (~SP_FLAG_MASK)) {
pr_err_ratelimited("share pool: allocation failed, invalid flag %lx\n", sp_flags); pr_err_ratelimited("share pool: allocation failed, invalid flag %lx\n", sp_flags);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -2606,7 +2610,8 @@ EXPORT_SYMBOL_GPL(sp_config_dvpp_range); ...@@ -2606,7 +2610,8 @@ EXPORT_SYMBOL_GPL(sp_config_dvpp_range);
static bool is_sp_normal_addr(unsigned long addr) static bool is_sp_normal_addr(unsigned long addr)
{ {
return addr >= MMAP_SHARE_POOL_START && return addr >= MMAP_SHARE_POOL_START &&
addr < MMAP_SHARE_POOL_16G_START + MMAP_SHARE_POOL_16G_SIZE; addr < MMAP_SHARE_POOL_16G_START +
MAX_DEVID * MMAP_SHARE_POOL_16G_SIZE;
} }
/** /**
...@@ -2634,6 +2639,26 @@ bool is_sharepool_addr(unsigned long addr) ...@@ -2634,6 +2639,26 @@ bool is_sharepool_addr(unsigned long addr)
} }
EXPORT_SYMBOL_GPL(is_sharepool_addr); EXPORT_SYMBOL_GPL(is_sharepool_addr);
int sp_node_id(struct vm_area_struct *vma)
{
struct sp_area *spa;
int node_id = numa_node_id();
if (!enable_ascend_share_pool)
return node_id;
if (vma) {
spa = __find_sp_area(vma->vm_start);
if (spa) {
node_id = spa->node_id;
__sp_area_drop(spa);
}
}
return node_id;
}
EXPORT_SYMBOL_GPL(sp_node_id);
static int __init mdc_default_group(char *s) static int __init mdc_default_group(char *s)
{ {
enable_mdc_default_group = 1; enable_mdc_default_group = 1;
...@@ -2999,6 +3024,16 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -2999,6 +3024,16 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
unsigned long haddr = address & huge_page_mask(h); unsigned long haddr = address & huge_page_mask(h);
bool new_page = false; bool new_page = false;
int err; int err;
int node_id;
struct sp_area *spa;
spa = __find_sp_area(vma->vm_start);
if (!spa) {
pr_err("share pool: vma is invalid, not from sp mmap\n");
return ret;
}
node_id = spa->node_id;
__sp_area_drop(spa);
retry: retry:
page = find_lock_page(mapping, idx); page = find_lock_page(mapping, idx);
...@@ -3010,7 +3045,7 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -3010,7 +3045,7 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
page = alloc_huge_page(vma, haddr, 0); page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) { if (IS_ERR(page)) {
page = alloc_huge_page_node(hstate_file(vma->vm_file), page = alloc_huge_page_node(hstate_file(vma->vm_file),
numa_mem_id()); node_id);
if (!page) if (!page)
page = ERR_PTR(-ENOMEM); page = ERR_PTR(-ENOMEM);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册