tlbflush.h 2.6 KB
Newer Older
1 2 3
#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H

4 5 6
#define MMU_NO_CONTEXT	~0UL


7
#include <asm/book3s/64/tlbflush-hash.h>
8
#include <asm/book3s/64/tlbflush-radix.h>
9

10 11 12 13 14 15 16 17 18
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
				       unsigned long start, unsigned long end)
{
	if (radix_enabled())
		return radix__flush_pmd_tlb_range(vma, start, end);
	return hash__flush_tlb_range(vma, start, end);
}

19 20 21
static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
22 23
	if (radix_enabled())
		return radix__flush_tlb_range(vma, start, end);
24 25 26 27 28 29
	return hash__flush_tlb_range(vma, start, end);
}

static inline void flush_tlb_kernel_range(unsigned long start,
					  unsigned long end)
{
30 31
	if (radix_enabled())
		return radix__flush_tlb_kernel_range(start, end);
32 33 34 35 36
	return hash__flush_tlb_kernel_range(start, end);
}

static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
37 38
	if (radix_enabled())
		return radix__local_flush_tlb_mm(mm);
39 40 41 42 43 44
	return hash__local_flush_tlb_mm(mm);
}

static inline void local_flush_tlb_page(struct vm_area_struct *vma,
					unsigned long vmaddr)
{
45 46
	if (radix_enabled())
		return radix__local_flush_tlb_page(vma, vmaddr);
47 48 49 50 51 52
	return hash__local_flush_tlb_page(vma, vmaddr);
}

static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
					 unsigned long vmaddr)
{
53 54
	if (radix_enabled())
		return radix__flush_tlb_page(vma, vmaddr);
55 56 57 58 59
	return hash__flush_tlb_page_nohash(vma, vmaddr);
}

static inline void tlb_flush(struct mmu_gather *tlb)
{
60 61
	if (radix_enabled())
		return radix__tlb_flush(tlb);
62 63 64 65 66 67
	return hash__tlb_flush(tlb);
}

#ifdef CONFIG_SMP
static inline void flush_tlb_mm(struct mm_struct *mm)
{
68 69
	if (radix_enabled())
		return radix__flush_tlb_mm(mm);
70 71 72 73 74 75
	return hash__flush_tlb_mm(mm);
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
				  unsigned long vmaddr)
{
76 77
	if (radix_enabled())
		return radix__flush_tlb_page(vma, vmaddr);
78 79 80 81 82 83
	return hash__flush_tlb_page(vma, vmaddr);
}
#else
#define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, addr)	local_flush_tlb_page(vma, addr)
#endif /* CONFIG_SMP */
84 85 86 87 88 89 90 91 92 93 94 95
/*
 * flush the page walk cache for the address
 */
static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
{
	/*
	 * Flush the page table walk cache on freeing a page table. We already
	 * have marked the upper/higher level page table entry none by now.
	 * So it is safe to flush PWC here.
	 */
	if (!radix_enabled())
		return;
96

97 98
	radix__flush_tlb_pwc(tlb, address);
}
99
#endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */