提交 3a5b6495 编写于 作者: Z Zefan Li 提交者: Xie XiuQi

mm/hugetlb: allocate huge page and setup page table for Davinci online mode

ascend inclusion
category: feature
bugzilla: NA
CVE: NA

-------------------

allocate huge page and setup page table for Davinci online mode.
remove vma parameter so  hisi driver doesn't have to make a fake vma.
add a new interface to retrieve hugepage stats.
Signed-off-by: NZefan Li <lizefan@huawei.com>
Signed-off-by: NLI Heng <liheng40@huawei.com>
Signed-off-by: NLijun Fang <fanglijun3@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 53324233
...@@ -372,6 +372,13 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, ...@@ -372,6 +372,13 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
int huge_add_to_page_cache(struct page *page, struct address_space *mapping, int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx); pgoff_t idx);
#ifdef CONFIG_ARCH_ASCEND
const struct hstate *hugetlb_get_hstate(void);
struct page *hugetlb_alloc_hugepage(int nid);
int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr,
pgprot_t prot, struct page *hpage);
#endif
/* arch callback */ /* arch callback */
int __init __alloc_bootmem_huge_page(struct hstate *h); int __init __alloc_bootmem_huge_page(struct hstate *h);
int __init alloc_bootmem_huge_page(struct hstate *h); int __init alloc_bootmem_huge_page(struct hstate *h);
......
...@@ -5122,3 +5122,78 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) ...@@ -5122,3 +5122,78 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
} }
} }
#ifdef CONFIG_ARCH_ASCEND
const struct hstate *hugetlb_get_hstate(void)
{
return &default_hstate;
}
EXPORT_SYMBOL_GPL(hugetlb_get_hstate);
/*
* Allocate hugepage without reserve
*/
struct page *hugetlb_alloc_hugepage(int nid)
{
return alloc_huge_page_node(&default_hstate, nid);
}
EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage);
static pte_t *hugetlb_huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
unsigned long size)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
pud = pud_alloc(mm, pgd, addr);
else
pud = pud_offset(pgd, addr);
if (pud) {
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (pmd) {
if (!pmd_none(*pmd))
pmd_clear(pmd);
pte = (pte_t *)pmd;
} else {
pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
} else {
pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
}
return pte;
}
int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr,
pgprot_t prot, struct page *hpage)
{
pte_t *ptep, entry;
struct hstate *h = &default_hstate;
ptep = hugetlb_huge_pte_alloc(mm, addr, huge_page_size(h));
if (!ptep)
return -ENXIO;
WARN_ON(ptep && !pte_none(*ptep) && !pte_huge(*ptep));
entry = mk_huge_pte(hpage, prot);
entry = huge_pte_mkdirty(entry);
entry = huge_pte_mkwrite(entry);
entry = pte_mkyoung(entry);
entry = pte_mkhuge(entry);
set_huge_pte_at(mm, addr, ptep, entry);
return 0;
}
EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte);
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册