tlbflush.h 5.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Based on arch/arm/include/asm/tlbflush.h
 *
 * Copyright (C) 1999-2003 Russell King
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#ifndef __ASM_TLBFLUSH_H
#define __ASM_TLBFLUSH_H

#ifndef __ASSEMBLY__

#include <linux/sched.h>
#include <asm/cputype.h>
26
#include <asm/mmu.h>
27

28 29 30 31 32 33 34 35 36 37 38 39
/*
 * Raw TLBI operations.
 *
 * Where necessary, use the __tlbi() macro to avoid asm()
 * boilerplate. Drivers and most kernel code should use the TLB
 * management routines in preference to the macro below.
 *
 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
 * on whether a particular TLBI operation takes an argument or
 * not. The macros handles invoking the asm with or without the
 * register argument as appropriate.
 */
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
#define __TLBI_0(op, arg) asm ("tlbi " #op "\n"				       \
		   ALTERNATIVE("nop\n			nop",		       \
			       "dsb ish\n		tlbi " #op,	       \
			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
			       CONFIG_QCOM_FALKOR_ERRATUM_1009)		       \
			    : : )

#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n"			       \
		   ALTERNATIVE("nop\n			nop",		       \
			       "dsb ish\n		tlbi " #op ", %0",     \
			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
			       CONFIG_QCOM_FALKOR_ERRATUM_1009)		       \
			    : : "r" (arg))

#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
55 56 57

#define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)

58 59 60 61 62
#define __tlbi_user(op, arg) do {						\
	if (arm64_kernel_unmapped_at_el0())					\
		__tlbi(op, (arg) | USER_ASID_FLAG);				\
} while (0)

63 64 65 66 67 68 69 70 71
/* This macro creates a properly formatted VA operand for the TLBI */
#define __TLBI_VADDR(addr, asid)				\
	({							\
		unsigned long __ta = (addr) >> 12;		\
		__ta &= GENMASK_ULL(43, 0);			\
		__ta |= (unsigned long)(asid) << 48;		\
		__ta;						\
	})

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
/*
 *	TLB Management
 *	==============
 *
 *	The TLB specific code is expected to perform whatever tests it needs
 *	to determine if it should invalidate the TLB for each call.  Start
 *	addresses are inclusive and end addresses are exclusive; it is safe to
 *	round these addresses down.
 *
 *	flush_tlb_all()
 *
 *		Invalidate the entire TLB.
 *
 *	flush_tlb_mm(mm)
 *
 *		Invalidate all TLB entries in a particular address space.
 *		- mm	- mm_struct describing address space
 *
 *	flush_tlb_range(mm,start,end)
 *
 *		Invalidate a range of TLB entries in the specified address
 *		space.
 *		- mm	- mm_struct describing address space
 *		- start - start address (may not be aligned)
 *		- end	- end address (exclusive, may not be aligned)
 *
 *	flush_tlb_page(vaddr,vma)
 *
 *		Invalidate the specified page in the specified address range.
 *		- vaddr - virtual address (may not be aligned)
 *		- vma	- vma_struct describing address range
 *
 *	flush_kern_tlb_page(kaddr)
 *
 *		Invalidate the TLB entry for the specified page.  The address
 *		will be in the kernels virtual memory space.  Current uses
 *		only require the D-TLB to be invalidated.
 *		- kaddr - Kernel virtual memory address
 */
111 112 113
static inline void local_flush_tlb_all(void)
{
	dsb(nshst);
114
	__tlbi(vmalle1);
115 116 117 118
	dsb(nsh);
	isb();
}

119 120
static inline void flush_tlb_all(void)
{
121
	dsb(ishst);
122
	__tlbi(vmalle1is);
123
	dsb(ish);
124 125 126 127 128
	isb();
}

static inline void flush_tlb_mm(struct mm_struct *mm)
{
129
	unsigned long asid = __TLBI_VADDR(0, ASID(mm));
130

131
	dsb(ishst);
132
	__tlbi(aside1is, asid);
133
	__tlbi_user(aside1is, asid);
134
	dsb(ish);
135 136 137 138 139
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
				  unsigned long uaddr)
{
140
	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
141

142
	dsb(ishst);
143
	__tlbi(vale1is, addr);
144
	__tlbi_user(vale1is, addr);
145
	dsb(ish);
146 147
}

148 149 150 151 152 153
/*
 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
 * necessarily a performance improvement.
 */
#define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)

154 155 156
static inline void __flush_tlb_range(struct vm_area_struct *vma,
				     unsigned long start, unsigned long end,
				     bool last_level)
157
{
158
	unsigned long asid = ASID(vma->vm_mm);
159
	unsigned long addr;
160 161 162 163 164 165

	if ((end - start) > MAX_TLB_RANGE) {
		flush_tlb_mm(vma->vm_mm);
		return;
	}

166 167
	start = __TLBI_VADDR(start, asid);
	end = __TLBI_VADDR(end, asid);
168 169

	dsb(ishst);
170
	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
171
		if (last_level) {
172
			__tlbi(vale1is, addr);
173 174
			__tlbi_user(vale1is, addr);
		} else {
175
			__tlbi(vae1is, addr);
176 177
			__tlbi_user(vae1is, addr);
		}
178
	}
179 180 181
	dsb(ish);
}

182 183 184 185 186 187
static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	__flush_tlb_range(vma, start, end, false);
}

188
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
189 190
{
	unsigned long addr;
191 192 193 194 195 196

	if ((end - start) > MAX_TLB_RANGE) {
		flush_tlb_all();
		return;
	}

197 198
	start = __TLBI_VADDR(start, 0);
	end = __TLBI_VADDR(end, 0);
199 200 201

	dsb(ishst);
	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
202
		__tlbi(vaae1is, addr);
203
	dsb(ish);
204
	isb();
205
}
206

207 208 209 210 211 212 213
/*
 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
 * table levels (pgd/pud/pmd).
 */
static inline void __flush_tlb_pgtable(struct mm_struct *mm,
				       unsigned long uaddr)
{
214
	unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm));
215

216
	__tlbi(vae1is, addr);
217
	__tlbi_user(vae1is, addr);
218 219
	dsb(ish);
}
S
Steve Capper 已提交
220

221 222 223 224 225 226
static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
{
	unsigned long addr = __TLBI_VADDR(kaddr, 0);

	__tlbi(vaae1is, addr);
	dsb(ish);
227
	isb();
228
}
229 230 231
#endif

#endif