提交 da146769 编写于 作者: K Kirill A. Shutemov 提交者: Linus Torvalds

thp: fix zap_huge_pmd() for DAX

The original DAX code assumed that pgtable_t was a pointer, which isn't
true on all architectures.  Restructure the code to not rely on that
assumption.

[willy@linux.intel.com: further fixes integrated into this patch]
Signed-off-by: NKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: NMatthew Wilcox <willy@linux.intel.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 5b701b84
...@@ -1456,50 +1456,41 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1456,50 +1456,41 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr) pmd_t *pmd, unsigned long addr)
{ {
pmd_t orig_pmd;
spinlock_t *ptl; spinlock_t *ptl;
int ret = 0;
if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1)
pgtable_t pgtable; return 0;
pmd_t orig_pmd; /*
/* * For architectures like ppc64 we look at deposited pgtable
* For architectures like ppc64 we look at deposited pgtable * when calling pmdp_huge_get_and_clear. So do the
* when calling pmdp_huge_get_and_clear. So do the * pgtable_trans_huge_withdraw after finishing pmdp related
* pgtable_trans_huge_withdraw after finishing pmdp related * operations.
* operations. */
*/ orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, tlb->fullmm);
tlb->fullmm); tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr); if (vma_is_dax(vma)) {
if (vma_is_dax(vma)) { spin_unlock(ptl);
if (is_huge_zero_pmd(orig_pmd)) { if (is_huge_zero_pmd(orig_pmd))
pgtable = NULL;
} else {
spin_unlock(ptl);
return 1;
}
} else {
pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
}
if (is_huge_zero_pmd(orig_pmd)) {
atomic_long_dec(&tlb->mm->nr_ptes);
spin_unlock(ptl);
put_huge_zero_page(); put_huge_zero_page();
} else { } else if (is_huge_zero_pmd(orig_pmd)) {
struct page *page = pmd_page(orig_pmd); pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
page_remove_rmap(page); atomic_long_dec(&tlb->mm->nr_ptes);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); spin_unlock(ptl);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); put_huge_zero_page();
VM_BUG_ON_PAGE(!PageHead(page), page); } else {
atomic_long_dec(&tlb->mm->nr_ptes); struct page *page = pmd_page(orig_pmd);
spin_unlock(ptl); page_remove_rmap(page);
tlb_remove_page(tlb, page); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
} add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
if (pgtable) VM_BUG_ON_PAGE(!PageHead(page), page);
pte_free(tlb->mm, pgtable); pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
ret = 1; atomic_long_dec(&tlb->mm->nr_ptes);
spin_unlock(ptl);
tlb_remove_page(tlb, page);
} }
return ret; return 1;
} }
int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册