提交 426fecae 编写于 作者: W Weilong Chen 提交者: Yang Yingliang

ascend: share_pool: Use sharepool_no_page to alloc hugepage

ascend inclusion
category: feature
bugzilla: NA
CVE: NA

-------------------------------------------------

Sharepool applies for a dedicated interface for large pages,
which optimizes the efficiency of memory application
Reviewed-by: NDing Tianhong <dingtianhong@huawei.com>
Signed-off-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 d079bf87
...@@ -645,4 +645,8 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h, ...@@ -645,4 +645,8 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
return ptl; return ptl;
} }
#ifdef CONFIG_ASCEND_SHARE_POOL
pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, int writable);
#endif
#endif /* _LINUX_HUGETLB_H */ #endif /* _LINUX_HUGETLB_H */
...@@ -220,6 +220,11 @@ static inline void sp_dump_stack(void) ...@@ -220,6 +220,11 @@ static inline void sp_dump_stack(void)
dump_stack(); dump_stack();
} }
vm_fault_t sharepool_no_page(struct mm_struct *mm,
struct vm_area_struct *vma,
struct address_space *mapping, pgoff_t idx,
unsigned long address, pte_t *ptep, unsigned int flags);
#else #else
static inline int sp_group_add_task(int pid, int spg_id) static inline int sp_group_add_task(int pid, int spg_id)
......
...@@ -3352,8 +3352,13 @@ const struct vm_operations_struct hugetlb_vm_ops = { ...@@ -3352,8 +3352,13 @@ const struct vm_operations_struct hugetlb_vm_ops = {
.pagesize = hugetlb_vm_op_pagesize, .pagesize = hugetlb_vm_op_pagesize,
}; };
#ifdef CONFIG_ASCEND_SHARE_POOL
pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
int writable)
#else
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
int writable) int writable)
#endif
{ {
pte_t entry; pte_t entry;
...@@ -3370,6 +3375,9 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, ...@@ -3370,6 +3375,9 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
return entry; return entry;
} }
#ifdef CONFIG_ASCEND_SHARE_POOL
EXPORT_SYMBOL(make_huge_pte);
#endif
static void set_huge_ptep_writable(struct vm_area_struct *vma, static void set_huge_ptep_writable(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep) unsigned long address, pte_t *ptep)
...@@ -3962,12 +3970,6 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, ...@@ -3962,12 +3970,6 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
} }
page = alloc_huge_page(vma, haddr, 0); page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page) && sp_check_vm_share_pool(vma->vm_flags)) {
page = alloc_huge_page_node(hstate_file(vma->vm_file),
numa_mem_id());
if (!page)
page = ERR_PTR(-ENOMEM);
}
if (IS_ERR(page)) { if (IS_ERR(page)) {
/* /*
* Returning error will result in faulting task being * Returning error will result in faulting task being
...@@ -4155,7 +4157,15 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -4155,7 +4157,15 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = huge_ptep_get(ptep); entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) { if (huge_pte_none(entry)) {
#ifdef CONFIG_ASCEND_SHARE_POOL
if (sp_check_vm_share_pool(vma->vm_flags)) {
ret = sharepool_no_page(mm, vma, mapping, idx, address, ptep, flags);
} else {
ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
}
#else
ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
#endif
goto out_mutex; goto out_mutex;
} }
......
...@@ -41,6 +41,8 @@ ...@@ -41,6 +41,8 @@
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/rmap.h>
#include <linux/hugetlb.h>
/* access control mode macros */ /* access control mode macros */
#define AC_NONE 0 #define AC_NONE 0
...@@ -2399,6 +2401,94 @@ static int spa_stat_show(struct seq_file *seq, void *offset) ...@@ -2399,6 +2401,94 @@ static int spa_stat_show(struct seq_file *seq, void *offset)
return 0; return 0;
} }
vm_fault_t sharepool_no_page(struct mm_struct *mm,
struct vm_area_struct *vma,
struct address_space *mapping, pgoff_t idx,
unsigned long address, pte_t *ptep, unsigned int flags)
{
struct hstate *h = hstate_vma(vma);
vm_fault_t ret = VM_FAULT_SIGBUS;
unsigned long size;
struct page *page;
pte_t new_pte;
spinlock_t *ptl;
unsigned long haddr = address & huge_page_mask(h);
bool new_page = false;
int err;
retry:
page = find_lock_page(mapping, idx);
if (!page) {
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
goto out;
page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) {
page = alloc_huge_page_node(hstate_file(vma->vm_file),
numa_mem_id());
if (!page)
page = ERR_PTR(-ENOMEM);
}
if (IS_ERR(page)) {
ptl = huge_pte_lock(h, mm, ptep);
if (!huge_pte_none(huge_ptep_get(ptep))) {
ret = 0;
spin_unlock(ptl);
goto out;
}
spin_unlock(ptl);
ret = vmf_error(PTR_ERR(page));
goto out;
}
__SetPageUptodate(page);
new_page = true;
/* sharepool pages are all shared */
err = huge_add_to_page_cache(page, mapping, idx);
if (err) {
put_page(page);
if (err == -EEXIST)
goto retry;
goto out;
}
}
ptl = huge_pte_lock(h, mm, ptep);
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
goto backout;
ret = 0;
if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout;
page_dup_rmap(page, true);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, haddr, ptep, new_pte);
hugetlb_count_add(pages_per_huge_page(h), mm);
spin_unlock(ptl);
if (new_page) {
SetPagePrivate(&page[1]);
}
unlock_page(page);
out:
return ret;
backout:
spin_unlock(ptl);
unlock_page(page);
put_page(page);
goto out;
}
EXPORT_SYMBOL(sharepool_no_page);
/* /*
* Called by proc_root_init() to initialize the /proc/sharepool subtree * Called by proc_root_init() to initialize the /proc/sharepool subtree
*/ */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册