未验证 提交 543ee9f8 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!1246 mm/hugetlb_vmemmap: remap head page to newly allocated page

Merge Pull Request from: @ci-robot 
 
PR sync from: Liu Shixin <liushixin2@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/5RIDEB6A7NBTUWJUHQL6MOC4ZUD2N323/ 
 
 
Link:https://gitee.com/openeuler/kernel/pulls/1246 

Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com> 
Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com> 
......@@ -221,12 +221,7 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end,
return ret;
} while (pgd++, addr = next, addr != end);
/*
* We only change the mapping of the vmemmap virtual address range
* [@start + PAGE_SIZE, end), so we only need to flush the TLB which
* belongs to the range.
*/
flush_tlb_kernel_range(start + PAGE_SIZE, end);
flush_tlb_kernel_range(start, end);
return 0;
}
......@@ -264,9 +259,23 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
* to the tail pages.
*/
pgprot_t pgprot = PAGE_KERNEL_RO;
pte_t entry = mk_pte(walk->reuse_page, pgprot);
struct page *page = pte_page(*pte);
pte_t entry;
/* Remapping the head page requires r/w */
if (unlikely(addr == walk->reuse_addr)) {
pgprot = PAGE_KERNEL;
list_del(&walk->reuse_page->lru);
/*
* Makes sure that preceding stores to the page contents from
* vmemmap_remap_free() become visible before the set_pte_at()
* write.
*/
smp_wmb();
}
entry = mk_pte(walk->reuse_page, pgprot);
list_add_tail(&page->lru, walk->vmemmap_pages);
set_pte_at(&init_mm, addr, pte, entry);
}
......@@ -331,6 +340,24 @@ int vmemmap_remap_free(unsigned long start, unsigned long end,
.reuse_addr = reuse,
.vmemmap_pages = &vmemmap_pages,
};
int nid = page_to_nid((struct page *)start);
gfp_t gfp_mask = GFP_KERNEL | __GFP_THISNODE | __GFP_NORETRY |
__GFP_NOWARN;
/*
* Allocate a new head vmemmap page to avoid breaking a contiguous
* block of struct page memory when freeing it back to page allocator
* in free_vmemmap_page_list(). This will allow the likely contiguous
* struct page backing memory to be kept contiguous and allowing for
* more allocations of hugepages. Fallback to the currently
* mapped head page in case should it fail to allocate.
*/
walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0);
if (walk.reuse_page) {
copy_page(page_to_virt(walk.reuse_page),
(void *)walk.reuse_addr);
list_add(&walk.reuse_page->lru, &vmemmap_pages);
}
/*
* In order to make remapping routine most efficient for the huge pages,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册