diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 90df6ffe3a43140fcbfcb5ceaa312fdb7c642319..3d542a9732ae7ec2c5057e495eabb968814715c7 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -445,11 +445,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, * necessary anymore if we make hpte_need_flush() get the * page size from the slices */ - unsigned int psize = get_slice_psize(mm, addr); - unsigned int shift = mmu_psize_to_shift(psize); - unsigned long sz = ((1UL) << shift); - struct hstate *hstate = size_to_hstate(sz); - pte_update(mm, addr & hstate->mask, ptep, ~0UL, 1); + pte_update(mm, addr, ptep, ~0UL, 1); } *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); } diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 2b2f35f6985e561aaacf2ef515e654d0ba1985b4..282d9306361f58f3ba3443648f73dd6955a75fd4 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -53,11 +53,6 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, i = batch->index; - /* We mask the address for the base page size. Huge pages will - * have applied their own masking already - */ - addr &= PAGE_MASK; - /* Get page size (maybe move back to caller). * * NOTE: when using special 64K mappings in 4K environment like @@ -75,6 +70,9 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, } else psize = pte_pagesize_index(mm, addr, pte); + /* Mask the address for the correct page size */ + addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); + /* Build full vaddr */ if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr);