提交 785373b4 编写于 作者: L Linus Torvalds

Revert "rmap: do not call mmu_notifier_invalidate_page() under ptl"

This reverts commit aac2fea9.

It turns out that that patch was complete and utter garbage, and broke
KVM, resulting in odd oopses.

Quoting Andrea Arcangeli:
 "The aforementioned commit has 3 bugs.

  1) mmu_notifier_invalidate_range cannot be used in replacement of
     mmu_notifier_invalidate_range_start/end.

     For KVM mmu_notifier_invalidate_range is a noop and rightfully so.

     A MMU notifier implementation has to implement either
     ->invalidate_range method or the invalidate_range_start/end
     methods, not both. And if you implement invalidate_range_start/end
     like KVM is forced to do, calling mmu_notifier_invalidate_range in
     common code is a noop for KVM.

     For those MMU notifiers that can get away only implementing
     ->invalidate_range, the ->invalidate_range is implicitly called by
     mmu_notifier_invalidate_range_end(). And only those secondary MMUs
     that share the same pagetable with the primary MMU (like AMD
     iommuv2) can get away only implementing ->invalidate_range.

     So all cases (THP on/off) are broken right now.

     To fix this is enough to replace mmu_notifier_invalidate_range with
     mmu_notifier_invalidate_range_start;mmu_notifier_invalidate_range_end.
     Either that or call multiple mmu_notifier_invalidate_page like
     before.

  2) address + (1UL << compound_order(page) is buggy, it should be
     PAGE_SIZE << compound_order(page), it's bytes not pages, 2M not
     512.

  3) The whole invalidate_range thing was an attempt to call a single
     invalidate while walking multiple 4k ptes that maps the same THP
     (after a pmd virtual split without physical compound page THP
     split).

     It's unclear if the rmap_walk will always provide an address that
     is 2M aligned as parameter to try_to_unmap_one, in presence of THP.
     I think it needs also an address &= (PAGE_SIZE <<
     compound_order(page)) - 1 to be safe"

In general, we should stop making excuses for horrible MMU notifier
users.  It's much more important that the core VM is sane and safe, than
letting MMU notifiers sleep.

So if some MMU notifier is sleeping under a spinlock, we need to fix the
notifier, not try to make excuses for that garbage in the core VM.
Reported-and-tested-by: NBernhard Held <berny156@gmx.de>
Reported-and-tested-by: NAdam Borowski <kilobyte@angband.pl>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Wanpeng Li <kernellwp@gmail.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Takashi Iwai <tiwai@suse.de>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: axie <axie@amd.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 9c3a815f
......@@ -888,10 +888,10 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
.flags = PVMW_SYNC,
};
int *cleaned = arg;
bool invalidation_needed = false;
while (page_vma_mapped_walk(&pvmw)) {
int ret = 0;
address = pvmw.address;
if (pvmw.pte) {
pte_t entry;
pte_t *pte = pvmw.pte;
......@@ -899,11 +899,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
if (!pte_dirty(*pte) && !pte_write(*pte))
continue;
flush_cache_page(vma, pvmw.address, pte_pfn(*pte));
entry = ptep_clear_flush(vma, pvmw.address, pte);
flush_cache_page(vma, address, pte_pfn(*pte));
entry = ptep_clear_flush(vma, address, pte);
entry = pte_wrprotect(entry);
entry = pte_mkclean(entry);
set_pte_at(vma->vm_mm, pvmw.address, pte, entry);
set_pte_at(vma->vm_mm, address, pte, entry);
ret = 1;
} else {
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
......@@ -913,11 +913,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
continue;
flush_cache_page(vma, pvmw.address, page_to_pfn(page));
entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd);
flush_cache_page(vma, address, page_to_pfn(page));
entry = pmdp_huge_clear_flush(vma, address, pmd);
entry = pmd_wrprotect(entry);
entry = pmd_mkclean(entry);
set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry);
set_pmd_at(vma->vm_mm, address, pmd, entry);
ret = 1;
#else
/* unexpected pmd-mapped page? */
......@@ -926,16 +926,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
}
if (ret) {
mmu_notifier_invalidate_page(vma->vm_mm, address);
(*cleaned)++;
invalidation_needed = true;
}
}
if (invalidation_needed) {
mmu_notifier_invalidate_range(vma->vm_mm, address,
address + (1UL << compound_order(page)));
}
return true;
}
......@@ -1328,7 +1323,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
};
pte_t pteval;
struct page *subpage;
bool ret = true, invalidation_needed = false;
bool ret = true;
enum ttu_flags flags = (enum ttu_flags)arg;
/* munlock has nothing to gain from examining un-locked vmas */
......@@ -1368,9 +1363,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
VM_BUG_ON_PAGE(!pvmw.pte, page);
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
address = pvmw.address;
if (!(flags & TTU_IGNORE_ACCESS)) {
if (ptep_clear_flush_young_notify(vma, pvmw.address,
if (ptep_clear_flush_young_notify(vma, address,
pvmw.pte)) {
ret = false;
page_vma_mapped_walk_done(&pvmw);
......@@ -1379,7 +1376,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
/* Nuke the page table entry. */
flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
if (should_defer_flush(mm, flags)) {
/*
* We clear the PTE but do not flush so potentially
......@@ -1389,12 +1386,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* transition on a cached TLB entry is written through
* and traps if the PTE is unmapped.
*/
pteval = ptep_get_and_clear(mm, pvmw.address,
pvmw.pte);
pteval = ptep_get_and_clear(mm, address, pvmw.pte);
set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
} else {
pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
pteval = ptep_clear_flush(vma, address, pvmw.pte);
}
/* Move the dirty bit to the page. Now the pte is gone. */
......@@ -1409,12 +1405,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (PageHuge(page)) {
int nr = 1 << compound_order(page);
hugetlb_count_sub(nr, mm);
set_huge_swap_pte_at(mm, pvmw.address,
set_huge_swap_pte_at(mm, address,
pvmw.pte, pteval,
vma_mmu_pagesize(vma));
} else {
dec_mm_counter(mm, mm_counter(page));
set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
set_pte_at(mm, address, pvmw.pte, pteval);
}
} else if (pte_unused(pteval)) {
......@@ -1438,7 +1434,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte);
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(subpage) };
pte_t swp_pte;
......@@ -1464,7 +1460,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* If the page was redirtied, it cannot be
* discarded. Remap the page to page table.
*/
set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
set_pte_at(mm, address, pvmw.pte, pteval);
SetPageSwapBacked(page);
ret = false;
page_vma_mapped_walk_done(&pvmw);
......@@ -1472,7 +1468,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
if (swap_duplicate(entry) < 0) {
set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
......@@ -1488,18 +1484,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte);
} else
dec_mm_counter(mm, mm_counter_file(page));
discard:
page_remove_rmap(subpage, PageHuge(page));
put_page(page);
invalidation_needed = true;
mmu_notifier_invalidate_page(mm, address);
}
if (invalidation_needed)
mmu_notifier_invalidate_range(mm, address,
address + (1UL << compound_order(page)));
return ret;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册