提交 396ff7c6 编写于 作者: Z Zhou Guanghui 提交者: Yang Yingliang

Ascend/hugetlb:support alloc normal and temporary hugepge

ascend inclusion
category: bugfix
bugzilla: NA
CVE: NA

----------------------------------------------------

The current function hugetlb_alloc_hugepage implements the allocation
from normal hugepages first. When the normal hugepage is used up, it
attempts to apply for temporary hugepages. Two additional modes are
supported: normal hugepages only and temporary hugepages only.
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 da660018
...@@ -376,8 +376,15 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, ...@@ -376,8 +376,15 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx); pgoff_t idx);
#ifdef CONFIG_ASCEND_FEATURES #ifdef CONFIG_ASCEND_FEATURES
#define HUGETLB_ALLOC_NONE 0x00
#define HUGETLB_ALLOC_NORMAL 0x01 /* normal hugepage */
#define HUGETLB_ALLOC_TEMP 0x02 /* temporary hugepage */
#define HUGETLB_ALLOC_MASK (HUGETLB_ALLOC_NONE | \
HUGETLB_ALLOC_NORMAL | \
HUGETLB_ALLOC_TEMP)
const struct hstate *hugetlb_get_hstate(void); const struct hstate *hugetlb_get_hstate(void);
struct page *hugetlb_alloc_hugepage(int nid); struct page *hugetlb_alloc_hugepage(int nid, int flag);
int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr,
pgprot_t prot, struct page *hpage); pgprot_t prot, struct page *hpage);
int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm,
...@@ -389,7 +396,7 @@ static inline const struct hstate *hugetlb_get_hstate(void) ...@@ -389,7 +396,7 @@ static inline const struct hstate *hugetlb_get_hstate(void)
return NULL; return NULL;
} }
static inline struct page *hugetlb_alloc_hugepage(int nid) static inline struct page *hugetlb_alloc_hugepage(int nid, int flag)
{ {
return NULL; return NULL;
} }
......
...@@ -5241,17 +5241,48 @@ const struct hstate *hugetlb_get_hstate(void) ...@@ -5241,17 +5241,48 @@ const struct hstate *hugetlb_get_hstate(void)
} }
EXPORT_SYMBOL_GPL(hugetlb_get_hstate); EXPORT_SYMBOL_GPL(hugetlb_get_hstate);
static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h,
gfp_t gfp_mask, int nid)
{
struct page *page = NULL;
spin_lock(&hugetlb_lock);
if (h->free_huge_pages - h->resv_huge_pages > 0)
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL, NULL);
spin_unlock(&hugetlb_lock);
return page;
}
/* /*
* Allocate hugepage without reserve * Allocate hugepage without reserve
*/ */
struct page *hugetlb_alloc_hugepage(int nid) struct page *hugetlb_alloc_hugepage(int nid, int flag)
{ {
struct hstate *h = &default_hstate;
gfp_t gfp_mask = htlb_alloc_mask(h);
struct page *page = NULL;
VM_WARN_ON(nid < 0 || nid >= MAX_NUMNODES); VM_WARN_ON(nid < 0 || nid >= MAX_NUMNODES);
if (flag & ~HUGETLB_ALLOC_MASK)
return NULL;
if (nid == NUMA_NO_NODE) if (nid == NUMA_NO_NODE)
nid = numa_mem_id(); nid = numa_mem_id();
return alloc_huge_page_node(&default_hstate, nid); gfp_mask |= __GFP_THISNODE;
if (flag & HUGETLB_ALLOC_NORMAL)
page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid);
else if (flag & HUGETLB_ALLOC_TEMP) {
if (enable_charge_mighp)
gfp_mask |= __GFP_ACCOUNT;
page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
} else
page = alloc_huge_page_node(h, nid);
return page;
} }
EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage); EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage);
......
...@@ -3133,7 +3133,7 @@ struct page *sp_alloc_pages(struct vm_struct *area, gfp_t mask, ...@@ -3133,7 +3133,7 @@ struct page *sp_alloc_pages(struct vm_struct *area, gfp_t mask,
unsigned int page_order, int node) unsigned int page_order, int node)
{ {
if (area->flags & VM_HUGE_PAGES) if (area->flags & VM_HUGE_PAGES)
return hugetlb_alloc_hugepage(NUMA_NO_NODE); return hugetlb_alloc_hugepage(NUMA_NO_NODE, HUGETLB_ALLOC_NONE);
else else
return alloc_pages_node(node, mask, page_order); return alloc_pages_node(node, mask, page_order);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册