提交 4781593d 编写于 作者: P Peter Xu 提交者: Andrew Morton

mm/hugetlb: unify clearing of RestoreReserve for private pages

A trivial cleanup to move clearing of RestoreReserve into adding anon rmap
of private hugetlb mappings.  It matches with the shared mappings where we
only clear the bit when adding into page cache, rather than spreading it
around the code paths.

Link: https://lkml.kernel.org/r/20221020193832.776173-1-peterx@redhat.comSigned-off-by: NPeter Xu <peterx@redhat.com>
Reviewed-by: NMike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
上级 cc03817c
...@@ -4775,7 +4775,6 @@ hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr ...@@ -4775,7 +4775,6 @@ hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr
hugepage_add_new_anon_rmap(new_page, vma, addr); hugepage_add_new_anon_rmap(new_page, vma, addr);
set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1)); set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
ClearHPageRestoreReserve(new_page);
SetHPageMigratable(new_page); SetHPageMigratable(new_page);
} }
...@@ -5438,8 +5437,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -5438,8 +5437,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
spin_lock(ptl); spin_lock(ptl);
ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
ClearHPageRestoreReserve(new_page);
/* Break COW or unshare */ /* Break COW or unshare */
huge_ptep_clear_flush(vma, haddr, ptep); huge_ptep_clear_flush(vma, haddr, ptep);
mmu_notifier_invalidate_range(mm, range.start, range.end); mmu_notifier_invalidate_range(mm, range.start, range.end);
...@@ -5734,10 +5731,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, ...@@ -5734,10 +5731,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
if (!pte_same(huge_ptep_get(ptep), old_pte)) if (!pte_same(huge_ptep_get(ptep), old_pte))
goto backout; goto backout;
if (anon_rmap) { if (anon_rmap)
ClearHPageRestoreReserve(page);
hugepage_add_new_anon_rmap(page, vma, haddr); hugepage_add_new_anon_rmap(page, vma, haddr);
} else else
page_dup_file_rmap(page, true); page_dup_file_rmap(page, true);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED))); && (vma->vm_flags & VM_SHARED)));
...@@ -6120,12 +6116,10 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, ...@@ -6120,12 +6116,10 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
if (!huge_pte_none_mostly(huge_ptep_get(dst_pte))) if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
goto out_release_unlock; goto out_release_unlock;
if (page_in_pagecache) { if (page_in_pagecache)
page_dup_file_rmap(page, true); page_dup_file_rmap(page, true);
} else { else
ClearHPageRestoreReserve(page);
hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
}
/* /*
* For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
......
...@@ -2571,7 +2571,7 @@ void hugepage_add_new_anon_rmap(struct page *page, ...@@ -2571,7 +2571,7 @@ void hugepage_add_new_anon_rmap(struct page *page,
BUG_ON(address < vma->vm_start || address >= vma->vm_end); BUG_ON(address < vma->vm_start || address >= vma->vm_end);
atomic_set(compound_mapcount_ptr(page), 0); atomic_set(compound_mapcount_ptr(page), 0);
atomic_set(compound_pincount_ptr(page), 0); atomic_set(compound_pincount_ptr(page), 0);
ClearHPageRestoreReserve(page);
__page_set_anon_rmap(page, vma, address, 1); __page_set_anon_rmap(page, vma, address, 1);
} }
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册