You need to sign in or sign up before continuing.
提交 ff0fb9e8 编写于 作者: M Ma Wupeng 提交者: Yang Yingliang

mm: thp: Add memory reliable support for hugepaged collapse

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Hugepaged collapse pages into huge page will use the same memory region.
When hugepaged collapse pages into huge page, hugepaged will check if
there is any reliable pages in the area to be collapsed. If this area
contains any reliable pages, hugepaged will alloc memory from mirrored
region. Otherwise it will alloc momory from non-mirrored region.
Signed-off-by: NMa Wupeng <mawupeng1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 094eaabb
...@@ -950,7 +950,8 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, ...@@ -950,7 +950,8 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
static void collapse_huge_page(struct mm_struct *mm, static void collapse_huge_page(struct mm_struct *mm,
unsigned long address, unsigned long address,
struct page **hpage, struct page **hpage,
int node, int referenced, int unmapped) int node, int referenced, int unmapped,
bool reliable)
{ {
pmd_t *pmd, _pmd; pmd_t *pmd, _pmd;
pte_t *pte; pte_t *pte;
...@@ -969,6 +970,9 @@ static void collapse_huge_page(struct mm_struct *mm, ...@@ -969,6 +970,9 @@ static void collapse_huge_page(struct mm_struct *mm,
/* Only allocate from the target node */ /* Only allocate from the target node */
gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
if (reliable)
gfp |= ___GFP_RELIABILITY;
/* /*
* Before allocating the hugepage, release the mmap_sem read lock. * Before allocating the hugepage, release the mmap_sem read lock.
* The allocation can take potentially a long time if it involves * The allocation can take potentially a long time if it involves
...@@ -1127,6 +1131,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, ...@@ -1127,6 +1131,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
spinlock_t *ptl; spinlock_t *ptl;
int node = NUMA_NO_NODE, unmapped = 0; int node = NUMA_NO_NODE, unmapped = 0;
bool writable = false; bool writable = false;
bool reliable = false;
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
...@@ -1215,6 +1220,9 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, ...@@ -1215,6 +1220,9 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
page_is_young(page) || PageReferenced(page) || page_is_young(page) || PageReferenced(page) ||
mmu_notifier_test_young(vma->vm_mm, address)) mmu_notifier_test_young(vma->vm_mm, address))
referenced++; referenced++;
if (page_reliable(page))
reliable = true;
} }
if (!writable) { if (!writable) {
result = SCAN_PAGE_RO; result = SCAN_PAGE_RO;
...@@ -1230,7 +1238,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, ...@@ -1230,7 +1238,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
node = khugepaged_find_target_node(); node = khugepaged_find_target_node();
/* collapse_huge_page will return with the mmap_sem released */ /* collapse_huge_page will return with the mmap_sem released */
collapse_huge_page(mm, address, hpage, node, collapse_huge_page(mm, address, hpage, node,
referenced, unmapped); referenced, unmapped, reliable);
} }
out: out:
trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
...@@ -1324,7 +1332,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) ...@@ -1324,7 +1332,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
*/ */
static void collapse_shmem(struct mm_struct *mm, static void collapse_shmem(struct mm_struct *mm,
struct address_space *mapping, pgoff_t start, struct address_space *mapping, pgoff_t start,
struct page **hpage, int node) struct page **hpage, int node, bool reliable)
{ {
gfp_t gfp; gfp_t gfp;
struct page *page, *new_page, *tmp; struct page *page, *new_page, *tmp;
...@@ -1340,6 +1348,9 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1340,6 +1348,9 @@ static void collapse_shmem(struct mm_struct *mm,
/* Only allocate from the target node */ /* Only allocate from the target node */
gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
if (reliable)
gfp |= ___GFP_RELIABILITY;
new_page = khugepaged_alloc_page(hpage, gfp, node); new_page = khugepaged_alloc_page(hpage, gfp, node);
if (!new_page) { if (!new_page) {
result = SCAN_ALLOC_HUGE_PAGE_FAIL; result = SCAN_ALLOC_HUGE_PAGE_FAIL;
...@@ -1613,6 +1624,7 @@ static void khugepaged_scan_shmem(struct mm_struct *mm, ...@@ -1613,6 +1624,7 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
int present, swap; int present, swap;
int node = NUMA_NO_NODE; int node = NUMA_NO_NODE;
int result = SCAN_SUCCEED; int result = SCAN_SUCCEED;
bool reliable = false;
present = 0; present = 0;
swap = 0; swap = 0;
...@@ -1670,6 +1682,9 @@ static void khugepaged_scan_shmem(struct mm_struct *mm, ...@@ -1670,6 +1682,9 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
slot = radix_tree_iter_resume(slot, &iter); slot = radix_tree_iter_resume(slot, &iter);
cond_resched_rcu(); cond_resched_rcu();
} }
if (page_reliable(page))
reliable = true;
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -1678,7 +1693,8 @@ static void khugepaged_scan_shmem(struct mm_struct *mm, ...@@ -1678,7 +1693,8 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
result = SCAN_EXCEED_NONE_PTE; result = SCAN_EXCEED_NONE_PTE;
} else { } else {
node = khugepaged_find_target_node(); node = khugepaged_find_target_node();
collapse_shmem(mm, mapping, start, hpage, node); collapse_shmem(mm, mapping, start, hpage, node,
reliable);
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册