提交 08b7da3e 编写于 作者: K Kemeng Shi 提交者: Zheng Zengkai

hugepage: add sysctl for hugepage alloc and mig

euleros inclusion
category: feature
feature: etmem
bugzilla: 48246

-------------------------------------------------

Add /proc/sys/kernel/hugepage_pmem_allocall switch. Set 1 to allowed all
memory in pmem could alloc for hugepage. Set 0(default) hugepage alloc is
limited by zone watermark as usual.
Add /proc/sys/kernel/hugepage_mig_noalloc switch. Set 1 to forbid new
hugepage alloc in hugepage migration when hugepage in dest node runs
out. Set 0(default) to allow hugepage alloc in hugepage migration as
usual.
Signed-off-by: NKemeng Shi <shikemeng@huawei.com>
Reviewed-by: Nlouhongxiang <louhongxiang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 533cc721
......@@ -159,6 +159,9 @@ struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages;
extern int sysctl_hugetlb_mig_noalloc;
extern int sysctl_hugetlb_pmem_allocall;
/* arch callbacks */
pte_t *huge_pte_alloc(struct mm_struct *mm,
......
......@@ -3129,6 +3129,24 @@ static struct ctl_table vm_table[] = {
.extra2 = SYSCTL_ONE,
},
#endif
{
.procname = "hugepage_mig_noalloc",
.data = &sysctl_hugetlb_mig_noalloc,
.maxlen = sizeof(sysctl_hugetlb_mig_noalloc),
.mode = 0600,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "hugepage_pmem_allocall",
.data = &sysctl_hugetlb_pmem_allocall,
.maxlen = sizeof(sysctl_hugetlb_pmem_allocall),
.mode = 0600,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{ }
};
......
......@@ -79,6 +79,9 @@ DEFINE_SPINLOCK(hugetlb_lock);
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
int sysctl_hugetlb_mig_noalloc;
int sysctl_hugetlb_pmem_allocall;
static inline bool PageHugeFreed(struct page *head)
{
return page_private(head + 4) == -1UL;
......@@ -1736,6 +1739,8 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
if (is_node_pmem(node) && sysctl_hugetlb_pmem_allocall)
gfp_mask |= __GFP_MEMALLOC;
page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
node_alloc_noretry);
if (page)
......@@ -1980,7 +1985,7 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
struct page *page;
page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
if (page) {
if (page || sysctl_hugetlb_mig_noalloc) {
spin_unlock(&hugetlb_lock);
return page;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册