提交 0506c31d 编写于 作者: B Baolin Wang 提交者: akpm

mm: rmap: simplify the hugetlb handling when unmapping or migration

According to previous discussion [1], there are so many levels of
indenting to handle the hugetlb case when unmapping or migration.  We can
combine folio_test_anon() and huge_pmd_unshare() to save one level of
indenting, by adding a local variable and moving the VM_BUG_ON() a little
forward.

No intended functional changes in this patch.

[1] https://lore.kernel.org/all/0b986dc4-5843-3e2d-c2df-5a2e9f13e6ab@oracle.com/

Link: https://lkml.kernel.org/r/28414b1b96f095e838c1e548074f8e0fc70d78cf.1655724713.git.baolin.wang@linux.alibaba.comSigned-off-by: NBaolin Wang <baolin.wang@linux.alibaba.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
上级 f7cc67ae
...@@ -1537,6 +1537,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1537,6 +1537,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
PageAnonExclusive(subpage); PageAnonExclusive(subpage);
if (folio_test_hugetlb(folio)) { if (folio_test_hugetlb(folio)) {
bool anon = folio_test_anon(folio);
/* /*
* The try_to_unmap() is only passed a hugetlb page * The try_to_unmap() is only passed a hugetlb page
* in the case where the hugetlb page is poisoned. * in the case where the hugetlb page is poisoned.
...@@ -1551,31 +1553,28 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1551,31 +1553,28 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/ */
flush_cache_range(vma, range.start, range.end); flush_cache_range(vma, range.start, range.end);
if (!folio_test_anon(folio)) { /*
* To call huge_pmd_unshare, i_mmap_rwsem must be
* held in write mode. Caller needs to explicitly
* do this outside rmap routines.
*/
VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
flush_tlb_range(vma, range.start, range.end);
mmu_notifier_invalidate_range(mm, range.start,
range.end);
/* /*
* To call huge_pmd_unshare, i_mmap_rwsem must be * The ref count of the PMD page was dropped
* held in write mode. Caller needs to explicitly * which is part of the way map counting
* do this outside rmap routines. * is done for shared PMDs. Return 'true'
* here. When there is no other sharing,
* huge_pmd_unshare returns false and we will
* unmap the actual page and drop map count
* to zero.
*/ */
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); page_vma_mapped_walk_done(&pvmw);
break;
if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
flush_tlb_range(vma, range.start, range.end);
mmu_notifier_invalidate_range(mm, range.start,
range.end);
/*
* The ref count of the PMD page was dropped
* which is part of the way map counting
* is done for shared PMDs. Return 'true'
* here. When there is no other sharing,
* huge_pmd_unshare returns false and we will
* unmap the actual page and drop map count
* to zero.
*/
page_vma_mapped_walk_done(&pvmw);
break;
}
} }
pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
} else { } else {
...@@ -1906,6 +1905,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1906,6 +1905,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
PageAnonExclusive(subpage); PageAnonExclusive(subpage);
if (folio_test_hugetlb(folio)) { if (folio_test_hugetlb(folio)) {
bool anon = folio_test_anon(folio);
/* /*
* huge_pmd_unshare may unmap an entire PMD page. * huge_pmd_unshare may unmap an entire PMD page.
* There is no way of knowing exactly which PMDs may * There is no way of knowing exactly which PMDs may
...@@ -1915,31 +1916,28 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1915,31 +1916,28 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/ */
flush_cache_range(vma, range.start, range.end); flush_cache_range(vma, range.start, range.end);
if (!folio_test_anon(folio)) { /*
* To call huge_pmd_unshare, i_mmap_rwsem must be
* held in write mode. Caller needs to explicitly
* do this outside rmap routines.
*/
VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
flush_tlb_range(vma, range.start, range.end);
mmu_notifier_invalidate_range(mm, range.start,
range.end);
/* /*
* To call huge_pmd_unshare, i_mmap_rwsem must be * The ref count of the PMD page was dropped
* held in write mode. Caller needs to explicitly * which is part of the way map counting
* do this outside rmap routines. * is done for shared PMDs. Return 'true'
* here. When there is no other sharing,
* huge_pmd_unshare returns false and we will
* unmap the actual page and drop map count
* to zero.
*/ */
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); page_vma_mapped_walk_done(&pvmw);
break;
if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
flush_tlb_range(vma, range.start, range.end);
mmu_notifier_invalidate_range(mm, range.start,
range.end);
/*
* The ref count of the PMD page was dropped
* which is part of the way map counting
* is done for shared PMDs. Return 'true'
* here. When there is no other sharing,
* huge_pmd_unshare returns false and we will
* unmap the actual page and drop map count
* to zero.
*/
page_vma_mapped_walk_done(&pvmw);
break;
}
} }
/* Nuke the hugetlb page table entry */ /* Nuke the hugetlb page table entry */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册