提交 a8576876 编写于 作者: J Jingxian He 提交者: Zheng Zengkai

mm/pin_mem: improve pin mem pages rmap and free method

euleros inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4QPBH
CVE: NA

------------

1. Improve pin mem pages rmap:
Add Hotreplace flag for pin mem pages to avoid spilting.
When the Hotreplace flag is set, the page will not be
added to deferred_split page list during rmap.
If the pin mem pages added to the deferred_split page list,
deferred_split_scan ops will spilt the pages which has been pinned.
If the pin mem page is spilted, we can't remap the page to the recover
process with the recorded pin mem mapping rule.
Moreover, the deferred_split page list node can be corrupted while the
deferred_split_scan function and pin pages remapping executing at the
same time.

2. Improve free method for pin mem pages:
Use the put_page method instead of free_pages directly.
Signed-off-by: NJingxian He <hejingxian@huawei.com>
Reviewed-by: Kefeng Wang<wangkefeng.wang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 2cef14da
...@@ -531,6 +531,7 @@ int collect_pmd_huge_pages(struct task_struct *task, ...@@ -531,6 +531,7 @@ int collect_pmd_huge_pages(struct task_struct *task,
if (IS_PTE_PRESENT(pte_entry[0])) { if (IS_PTE_PRESENT(pte_entry[0])) {
temp_page = pfn_to_page(pte_entry[0] & PM_PFRAME_MASK); temp_page = pfn_to_page(pte_entry[0] & PM_PFRAME_MASK);
if (PageHead(temp_page)) { if (PageHead(temp_page)) {
SetPageHotreplace(temp_page);
atomic_inc(&((temp_page)->_refcount)); atomic_inc(&((temp_page)->_refcount));
start += HPAGE_PMD_SIZE; start += HPAGE_PMD_SIZE;
pme->phy_addr_array[index] = page_to_phys(temp_page); pme->phy_addr_array[index] = page_to_phys(temp_page);
...@@ -611,6 +612,7 @@ int collect_normal_pages(struct task_struct *task, ...@@ -611,6 +612,7 @@ int collect_normal_pages(struct task_struct *task,
continue; continue;
} }
tmp_page = pfn_to_page(pte_entry[i] & PM_PFRAME_MASK); tmp_page = pfn_to_page(pte_entry[i] & PM_PFRAME_MASK);
SetPageHotreplace(tmp_page);
atomic_inc(&(tmp_page->_refcount)); atomic_inc(&(tmp_page->_refcount));
phy_addr_array[i] = ((pte_entry[i] & PM_PFRAME_MASK) << PAGE_SHIFT); phy_addr_array[i] = ((pte_entry[i] & PM_PFRAME_MASK) << PAGE_SHIFT);
} }
...@@ -839,14 +841,16 @@ vm_fault_t remap_normal_pages(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -839,14 +841,16 @@ vm_fault_t remap_normal_pages(struct mm_struct *mm, struct vm_area_struct *vma,
ret = do_anon_page_remap(vma, address, pmd, page); ret = do_anon_page_remap(vma, address, pmd, page);
if (ret) if (ret)
goto free; goto free;
ClearPageHotreplace(page);
} }
return 0; return 0;
free: free:
ClearPageHotreplace(page);
for (i = j; i < pme->nr_pages; i++) { for (i = j; i < pme->nr_pages; i++) {
phy_addr = pme->phy_addr_array[i]; phy_addr = pme->phy_addr_array[i];
if (phy_addr) { if (phy_addr) {
__free_page(phys_to_page(phy_addr)); put_page(phys_to_page(phy_addr));
pme->phy_addr_array[i] = 0; pme->phy_addr_array[i] = 0;
} }
} }
...@@ -927,16 +931,18 @@ vm_fault_t remap_huge_pmd_pages(struct mm_struct *mm, struct vm_area_struct *vma ...@@ -927,16 +931,18 @@ vm_fault_t remap_huge_pmd_pages(struct mm_struct *mm, struct vm_area_struct *vma
ret = do_anon_huge_page_remap(vma, address, pmd, page); ret = do_anon_huge_page_remap(vma, address, pmd, page);
if (ret) if (ret)
goto free; goto free;
ClearPageHotreplace(page);
} }
return 0; return 0;
free: free:
ClearPageHotreplace(page);
for (i = j; i < pme->nr_pages; i++) { for (i = j; i < pme->nr_pages; i++) {
phy_addr = pme->phy_addr_array[i]; phy_addr = pme->phy_addr_array[i];
if (phy_addr) { if (phy_addr) {
page = phys_to_page(phy_addr); page = phys_to_page(phy_addr);
if (!(page->flags & PAGE_FLAGS_CHECK_RESERVED)) { if (!(page->flags & PAGE_FLAGS_CHECK_RESERVED)) {
__free_pages(page, HPAGE_PMD_ORDER); put_page(page);
pme->phy_addr_array[i] = 0; pme->phy_addr_array[i] = 0;
} }
} }
...@@ -950,7 +956,6 @@ static void free_unmap_pages(struct page_map_info *pmi, ...@@ -950,7 +956,6 @@ static void free_unmap_pages(struct page_map_info *pmi,
{ {
unsigned int i, j; unsigned int i, j;
unsigned long phy_addr; unsigned long phy_addr;
unsigned int order;
struct page *page; struct page *page;
pme = (struct page_map_entry *)(next_pme(pme)); pme = (struct page_map_entry *)(next_pme(pme));
...@@ -959,9 +964,8 @@ static void free_unmap_pages(struct page_map_info *pmi, ...@@ -959,9 +964,8 @@ static void free_unmap_pages(struct page_map_info *pmi,
phy_addr = pme->phy_addr_array[i]; phy_addr = pme->phy_addr_array[i];
if (phy_addr) { if (phy_addr) {
page = phys_to_page(phy_addr); page = phys_to_page(phy_addr);
order = pme->is_huge_page ? HPAGE_PMD_ORDER : 0;
if (!(page->flags & PAGE_FLAGS_CHECK_RESERVED)) { if (!(page->flags & PAGE_FLAGS_CHECK_RESERVED)) {
__free_pages(page, order); put_page(page);
pme->phy_addr_array[i] = 0; pme->phy_addr_array[i] = 0;
} }
} }
...@@ -1026,7 +1030,7 @@ EXPORT_SYMBOL_GPL(do_mem_remap); ...@@ -1026,7 +1030,7 @@ EXPORT_SYMBOL_GPL(do_mem_remap);
static void free_all_reserved_pages(void) static void free_all_reserved_pages(void)
{ {
unsigned int i, j, index, order; unsigned int i, j, index;
struct page_map_info *pmi; struct page_map_info *pmi;
struct page_map_entry *pme; struct page_map_entry *pme;
struct page *page; struct page *page;
...@@ -1042,12 +1046,12 @@ static void free_all_reserved_pages(void) ...@@ -1042,12 +1046,12 @@ static void free_all_reserved_pages(void)
pme = pmi->pme; pme = pmi->pme;
for (i = 0; i < pmi->entry_num; i++) { for (i = 0; i < pmi->entry_num; i++) {
for (j = 0; j < pme->nr_pages; j++) { for (j = 0; j < pme->nr_pages; j++) {
order = pme->is_huge_page ? HPAGE_PMD_ORDER : 0;
phy_addr = pme->phy_addr_array[j]; phy_addr = pme->phy_addr_array[j];
if (phy_addr) { if (phy_addr) {
page = phys_to_page(phy_addr); page = phys_to_page(phy_addr);
ClearPageHotreplace(page);
if (!(page->flags & PAGE_FLAGS_CHECK_RESERVED)) { if (!(page->flags & PAGE_FLAGS_CHECK_RESERVED)) {
__free_pages(page, order); put_page(page);
pme->phy_addr_array[j] = 0; pme->phy_addr_array[j] = 0;
} }
} }
......
...@@ -1313,8 +1313,10 @@ static void page_remove_anon_compound_rmap(struct page *page) ...@@ -1313,8 +1313,10 @@ static void page_remove_anon_compound_rmap(struct page *page)
* page of the compound page is unmapped, but at least one * page of the compound page is unmapped, but at least one
* small page is still mapped. * small page is still mapped.
*/ */
if (nr && nr < thp_nr_pages(page)) if (nr && nr < thp_nr_pages(page)) {
deferred_split_huge_page(page); if (!PageHotreplace(page))
deferred_split_huge_page(page);
}
} else { } else {
nr = thp_nr_pages(page); nr = thp_nr_pages(page);
} }
...@@ -1361,8 +1363,10 @@ void page_remove_rmap(struct page *page, bool compound) ...@@ -1361,8 +1363,10 @@ void page_remove_rmap(struct page *page, bool compound)
if (unlikely(PageMlocked(page))) if (unlikely(PageMlocked(page)))
clear_page_mlock(page); clear_page_mlock(page);
if (PageTransCompound(page)) if (PageTransCompound(page)) {
deferred_split_huge_page(compound_head(page)); if (!PageHotreplace(compound_head(page)))
deferred_split_huge_page(compound_head(page));
}
/* /*
* It would be tidy to reset the PageAnon mapping here, * It would be tidy to reset the PageAnon mapping here,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册