tlbflush.h 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Based on arch/arm/include/asm/tlbflush.h
 *
 * Copyright (C) 1999-2003 Russell King
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#ifndef __ASM_TLBFLUSH_H
#define __ASM_TLBFLUSH_H

#ifndef __ASSEMBLY__

#include <linux/sched.h>
#include <asm/cputype.h>

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*
 * Raw TLBI operations.
 *
 * Where necessary, use the __tlbi() macro to avoid asm()
 * boilerplate. Drivers and most kernel code should use the TLB
 * management routines in preference to the macro below.
 *
 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
 * on whether a particular TLBI operation takes an argument or
 * not. The macros handles invoking the asm with or without the
 * register argument as appropriate.
 */
#define __TLBI_0(op, arg)		asm ("tlbi " #op)
#define __TLBI_1(op, arg)		asm ("tlbi " #op ", %0" : : "r" (arg))
#define __TLBI_N(op, arg, n, ...)	__TLBI_##n(op, arg)

#define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
/*
 *	TLB Management
 *	==============
 *
 *	The TLB specific code is expected to perform whatever tests it needs
 *	to determine if it should invalidate the TLB for each call.  Start
 *	addresses are inclusive and end addresses are exclusive; it is safe to
 *	round these addresses down.
 *
 *	flush_tlb_all()
 *
 *		Invalidate the entire TLB.
 *
 *	flush_tlb_mm(mm)
 *
 *		Invalidate all TLB entries in a particular address space.
 *		- mm	- mm_struct describing address space
 *
 *	flush_tlb_range(mm,start,end)
 *
 *		Invalidate a range of TLB entries in the specified address
 *		space.
 *		- mm	- mm_struct describing address space
 *		- start - start address (may not be aligned)
 *		- end	- end address (exclusive, may not be aligned)
 *
 *	flush_tlb_page(vaddr,vma)
 *
 *		Invalidate the specified page in the specified address range.
 *		- vaddr - virtual address (may not be aligned)
 *		- vma	- vma_struct describing address range
 *
 *	flush_kern_tlb_page(kaddr)
 *
 *		Invalidate the TLB entry for the specified page.  The address
 *		will be in the kernels virtual memory space.  Current uses
 *		only require the D-TLB to be invalidated.
 *		- kaddr - Kernel virtual memory address
 */
84 85 86
static inline void local_flush_tlb_all(void)
{
	dsb(nshst);
87
	__tlbi(vmalle1);
88 89 90 91
	dsb(nsh);
	isb();
}

92 93
static inline void flush_tlb_all(void)
{
94
	dsb(ishst);
95
	__tlbi(vmalle1is);
96
	dsb(ish);
97 98 99 100 101
	isb();
}

static inline void flush_tlb_mm(struct mm_struct *mm)
{
102
	unsigned long asid = ASID(mm) << 48;
103

104
	dsb(ishst);
105
	__tlbi(aside1is, asid);
106
	dsb(ish);
107 108 109 110 111
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
				  unsigned long uaddr)
{
112
	unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
113

114
	dsb(ishst);
115
	__tlbi(vale1is, addr);
116
	dsb(ish);
117 118
}

119 120 121 122 123 124
/*
 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
 * necessarily a performance improvement.
 */
#define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)

125 126 127
static inline void __flush_tlb_range(struct vm_area_struct *vma,
				     unsigned long start, unsigned long end,
				     bool last_level)
128
{
129
	unsigned long asid = ASID(vma->vm_mm) << 48;
130
	unsigned long addr;
131 132 133 134 135 136

	if ((end - start) > MAX_TLB_RANGE) {
		flush_tlb_mm(vma->vm_mm);
		return;
	}

137 138 139 140
	start = asid | (start >> 12);
	end = asid | (end >> 12);

	dsb(ishst);
141 142
	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
		if (last_level)
143
			__tlbi(vale1is, addr);
144
		else
145
			__tlbi(vae1is, addr);
146
	}
147 148 149
	dsb(ish);
}

150 151 152 153 154 155
static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	__flush_tlb_range(vma, start, end, false);
}

156
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
157 158
{
	unsigned long addr;
159 160 161 162 163 164

	if ((end - start) > MAX_TLB_RANGE) {
		flush_tlb_all();
		return;
	}

165 166 167 168 169
	start >>= 12;
	end >>= 12;

	dsb(ishst);
	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
170
		__tlbi(vaae1is, addr);
171
	dsb(ish);
172
	isb();
173
}
174

175 176 177 178 179 180 181
/*
 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
 * table levels (pgd/pud/pmd).
 */
static inline void __flush_tlb_pgtable(struct mm_struct *mm,
				       unsigned long uaddr)
{
182
	unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
183

184
	__tlbi(vae1is, addr);
185 186
	dsb(ish);
}
S
Steve Capper 已提交
187

188 189 190
#endif

#endif