diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 419fb9e03447aff8aef55934e89bbd844a28d7e7..f0a7a0320300bae6eca05e27a2083f80ed54282e 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -94,7 +94,7 @@ static inline int split_huge_page(struct page *page) void deferred_split_huge_page(struct page *page); void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long address, bool freeze); + unsigned long address, bool freeze, struct page *page); #define split_huge_pmd(__vma, __pmd, __address) \ do { \ @@ -102,7 +102,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, if (pmd_trans_huge(*____pmd) \ || pmd_devmap(*____pmd)) \ __split_huge_pmd(__vma, __pmd, __address, \ - false); \ + false, NULL); \ } while (0) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 55940d17331871b7de478796764d2ef62aef12a4..343a2b7e57aa25f7d02709ff13ab8a5e47c68a87 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2984,7 +2984,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, } void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long address, bool freeze) + unsigned long address, bool freeze, struct page *page) { spinlock_t *ptl; struct mm_struct *mm = vma->vm_mm; @@ -2992,8 +2992,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); ptl = pmd_lock(mm, pmd); + + /* + * If caller asks to setup a migration entries, we need a page to check + * pmd against. Otherwise we can end up replacing wrong page. + */ + VM_BUG_ON(freeze && !page); + if (page && page != pmd_page(*pmd)) + goto out; + if (pmd_trans_huge(*pmd)) { - struct page *page = pmd_page(*pmd); + page = pmd_page(*pmd); if (PageMlocked(page)) clear_page_mlock(page); } else if (!pmd_devmap(*pmd)) @@ -3020,24 +3029,8 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, return; pmd = pmd_offset(pud, address); - if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd))) - return; - /* - * If caller asks to setup a migration entries, we need a page to check - * pmd against. Otherwise we can end up replacing wrong page. - */ - VM_BUG_ON(freeze && !page); - if (page && page != pmd_page(*pmd)) - return; - - /* - * Caller holds the mmap_sem write mode or the anon_vma lock, - * so a huge pmd cannot materialize from under us (khugepaged - * holds both the mmap_sem write mode and the anon_vma lock - * write mode). - */ - __split_huge_pmd(vma, pmd, address, freeze); + __split_huge_pmd(vma, pmd, address, freeze, page); } void vma_adjust_trans_huge(struct vm_area_struct *vma,