diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 10eb0d1e31404c2435484d6b1bef491eed5a2646..65037762b1205d4aedf10dac62ba9388f15c5edc 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -10,6 +10,8 @@ static inline int mmu_get_ap(int psize) return mmu_psize_defs[psize].ap; } +extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, unsigned long end, int psize); extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 0790c4e92a64520c55d472aa7d77195df8e746b7..146b269de8fad78d799154cc4d59edb698f7dc3d 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -16,6 +16,16 @@ static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, return hash__flush_tlb_range(vma, start, end); } +#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE +static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end) +{ + if (radix_enabled()) + return radix__flush_hugetlb_tlb_range(vma, start, end); + return hash__flush_tlb_range(vma, start, end); +} + static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 1eca0deaf89b0c9683c53aca4eed1117d3d88506..35254a6784561b6f5f70399822cb163f8e59b14d 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -25,6 +25,16 @@ void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long v radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); } +void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + int psize; + struct hstate *hstate = hstate_file(vma->vm_file); + + psize = hstate_get_psize(hstate); + radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); +} + /* * A vairant of hugetlb_get_unmapped_area doing topdown search * FIXME!! should we do as x86 does or non hugetlb area does ? diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f904246a8fd5ae29d0d727f9eb1ccad6b9272cca..af2d88253bfcb0a9377dc2a80b21d30c0c3ba92c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3938,6 +3938,14 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, return i ? i : -EFAULT; } +#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE +/* + * ARCHes with special requirements for evicting HUGETLB backing TLB entries can + * implement this. + */ +#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#endif + unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) { @@ -3998,7 +4006,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, * once we release i_mmap_rwsem, another task can do the final put_page * and that page table be reused and filled with junk. */ - flush_tlb_range(vma, start, end); + flush_hugetlb_tlb_range(vma, start, end); mmu_notifier_invalidate_range(mm, start, end); i_mmap_unlock_write(vma->vm_file->f_mapping); mmu_notifier_invalidate_range_end(mm, start, end);