提交 d4bec1a8 编写于 作者: Z Zhou Guanghui 提交者: Yang Yingliang

Ascend/hugetlb:support alloc normal and buddy hugepage

ascend inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4D63I
CVE: NA

----------------------------------------------------

The current function hugetlb_alloc_hugepage implements the allocation
from static hugepages first. When the static hugepage is used up, it
attempts to apply for hugepages from buddy system. Two additional modes
are supported: static hugepages only and buddy hugepages only.
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Signed-off-by: NGuo Mengqi <guomengqi3@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 8a04737c
...@@ -374,8 +374,15 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, ...@@ -374,8 +374,15 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx); pgoff_t idx);
#ifdef CONFIG_ASCEND_FEATURES #ifdef CONFIG_ASCEND_FEATURES
#define HUGETLB_ALLOC_NONE 0x00
#define HUGETLB_ALLOC_NORMAL 0x01 /* normal hugepage */
#define HUGETLB_ALLOC_BUDDY 0x02 /* buddy hugepage */
#define HUGETLB_ALLOC_MASK (HUGETLB_ALLOC_NONE | \
HUGETLB_ALLOC_NORMAL | \
HUGETLB_ALLOC_BUDDY)
const struct hstate *hugetlb_get_hstate(void); const struct hstate *hugetlb_get_hstate(void);
struct page *hugetlb_alloc_hugepage(int nid); struct page *hugetlb_alloc_hugepage(int nid, int flag);
int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr,
pgprot_t prot, struct page *hpage); pgprot_t prot, struct page *hpage);
#else #else
...@@ -384,7 +391,7 @@ static inline const struct hstate *hugetlb_get_hstate(void) ...@@ -384,7 +391,7 @@ static inline const struct hstate *hugetlb_get_hstate(void)
return NULL; return NULL;
} }
static inline struct page *hugetlb_alloc_hugepage(int nid) static inline struct page *hugetlb_alloc_hugepage(int nid, int flag)
{ {
return NULL; return NULL;
} }
......
...@@ -5234,17 +5234,48 @@ const struct hstate *hugetlb_get_hstate(void) ...@@ -5234,17 +5234,48 @@ const struct hstate *hugetlb_get_hstate(void)
} }
EXPORT_SYMBOL_GPL(hugetlb_get_hstate); EXPORT_SYMBOL_GPL(hugetlb_get_hstate);
static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h,
gfp_t gfp_mask, int nid)
{
struct page *page = NULL;
spin_lock(&hugetlb_lock);
if (h->free_huge_pages - h->resv_huge_pages > 0)
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL, NULL);
spin_unlock(&hugetlb_lock);
return page;
}
/* /*
* Allocate hugepage without reserve * Allocate hugepage without reserve
*/ */
struct page *hugetlb_alloc_hugepage(int nid) struct page *hugetlb_alloc_hugepage(int nid, int flag)
{ {
struct hstate *h = &default_hstate;
gfp_t gfp_mask = htlb_alloc_mask(h);
struct page *page = NULL;
VM_WARN_ON(nid < 0 || nid >= MAX_NUMNODES); VM_WARN_ON(nid < 0 || nid >= MAX_NUMNODES);
if (flag & ~HUGETLB_ALLOC_MASK)
return NULL;
if (nid == NUMA_NO_NODE) if (nid == NUMA_NO_NODE)
nid = numa_mem_id(); nid = numa_mem_id();
return alloc_huge_page_node(&default_hstate, nid); gfp_mask |= __GFP_THISNODE;
if (flag & HUGETLB_ALLOC_NORMAL)
page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid);
else if (flag & HUGETLB_ALLOC_BUDDY) {
if (enable_charge_mighp)
gfp_mask |= __GFP_ACCOUNT;
page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
} else
page = alloc_huge_page_node(h, nid);
return page;
} }
EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage); EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册