tlbflush.h 17.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
L
Linus Torvalds 已提交
2
/*
3
 *  arch/arm/include/asm/tlbflush.h
L
Linus Torvalds 已提交
4 5 6 7 8 9
 *
 *  Copyright (C) 1999-2003 Russell King
 */
#ifndef _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H

10 11 12 13
#ifndef __ASSEMBLY__
# include <linux/mm_types.h>
#endif

14
#ifdef CONFIG_MMU
15

L
Linus Torvalds 已提交
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
#include <asm/glue.h>

#define TLB_V4_U_PAGE	(1 << 1)
#define TLB_V4_D_PAGE	(1 << 2)
#define TLB_V4_I_PAGE	(1 << 3)
#define TLB_V6_U_PAGE	(1 << 4)
#define TLB_V6_D_PAGE	(1 << 5)
#define TLB_V6_I_PAGE	(1 << 6)

#define TLB_V4_U_FULL	(1 << 9)
#define TLB_V4_D_FULL	(1 << 10)
#define TLB_V4_I_FULL	(1 << 11)
#define TLB_V6_U_FULL	(1 << 12)
#define TLB_V6_D_FULL	(1 << 13)
#define TLB_V6_I_FULL	(1 << 14)

#define TLB_V6_U_ASID	(1 << 16)
#define TLB_V6_D_ASID	(1 << 17)
#define TLB_V6_I_ASID	(1 << 18)

36 37
#define TLB_V6_BP	(1 << 19)

38
/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
39 40 41 42
#define TLB_V7_UIS_PAGE	(1 << 20)
#define TLB_V7_UIS_FULL (1 << 21)
#define TLB_V7_UIS_ASID (1 << 22)
#define TLB_V7_UIS_BP	(1 << 23)
43

44
#define TLB_BARRIER	(1 << 28)
45
#define TLB_L2CLEAN_FR	(1 << 29)		/* Feroceon */
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54 55 56
#define TLB_DCLEAN	(1 << 30)
#define TLB_WB		(1 << 31)

/*
 *	MMU TLB Model
 *	=============
 *
 *	We have the following to choose from:
 *	  v4    - ARMv4 without write buffer
 *	  v4wb  - ARMv4 with write buffer without I TLB flush entry instruction
 *	  v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
57
 *	  fr    - Feroceon (v4wbi with non-outer-cacheable page table walks)
58
 *	  fa    - Faraday (v4 with write buffer with UTLB)
L
Linus Torvalds 已提交
59
 *	  v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
60
 *	  v7wbi - identical to v6wbi
L
Linus Torvalds 已提交
61 62 63 64
 */
#undef _TLB
#undef MULTI_TLB

65 66 67 68
#ifdef CONFIG_SMP_ON_UP
#define MULTI_TLB 1
#endif

L
Linus Torvalds 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
#define v4_tlb_flags	(TLB_V4_U_FULL | TLB_V4_U_PAGE)

#ifdef CONFIG_CPU_TLB_V4WT
# define v4_possible_flags	v4_tlb_flags
# define v4_always_flags	v4_tlb_flags
# ifdef _TLB
#  define MULTI_TLB 1
# else
#  define _TLB v4
# endif
#else
# define v4_possible_flags	0
# define v4_always_flags	(-1UL)
#endif

84
#define fa_tlb_flags	(TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
P
Paulius Zaleckas 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
			 TLB_V4_U_FULL | TLB_V4_U_PAGE)

#ifdef CONFIG_CPU_TLB_FA
# define fa_possible_flags	fa_tlb_flags
# define fa_always_flags	fa_tlb_flags
# ifdef _TLB
#  define MULTI_TLB 1
# else
#  define _TLB fa
# endif
#else
# define fa_possible_flags	0
# define fa_always_flags	(-1UL)
#endif

L
Linus Torvalds 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
#define v4wbi_tlb_flags	(TLB_WB | TLB_DCLEAN | \
			 TLB_V4_I_FULL | TLB_V4_D_FULL | \
			 TLB_V4_I_PAGE | TLB_V4_D_PAGE)

#ifdef CONFIG_CPU_TLB_V4WBI
# define v4wbi_possible_flags	v4wbi_tlb_flags
# define v4wbi_always_flags	v4wbi_tlb_flags
# ifdef _TLB
#  define MULTI_TLB 1
# else
#  define _TLB v4wbi
# endif
#else
# define v4wbi_possible_flags	0
# define v4wbi_always_flags	(-1UL)
#endif

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
#define fr_tlb_flags	(TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
			 TLB_V4_I_FULL | TLB_V4_D_FULL | \
			 TLB_V4_I_PAGE | TLB_V4_D_PAGE)

#ifdef CONFIG_CPU_TLB_FEROCEON
# define fr_possible_flags	fr_tlb_flags
# define fr_always_flags	fr_tlb_flags
# ifdef _TLB
#  define MULTI_TLB 1
# else
#  define _TLB v4wbi
# endif
#else
# define fr_possible_flags	0
# define fr_always_flags	(-1UL)
#endif

L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
#define v4wb_tlb_flags	(TLB_WB | TLB_DCLEAN | \
			 TLB_V4_I_FULL | TLB_V4_D_FULL | \
			 TLB_V4_D_PAGE)

#ifdef CONFIG_CPU_TLB_V4WB
# define v4wb_possible_flags	v4wb_tlb_flags
# define v4wb_always_flags	v4wb_tlb_flags
# ifdef _TLB
#  define MULTI_TLB 1
# else
#  define _TLB v4wb
# endif
#else
# define v4wb_possible_flags	0
# define v4wb_always_flags	(-1UL)
#endif

151
#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
L
Linus Torvalds 已提交
152 153
			 TLB_V6_I_FULL | TLB_V6_D_FULL | \
			 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
154 155
			 TLB_V6_I_ASID | TLB_V6_D_ASID | \
			 TLB_V6_BP)
L
Linus Torvalds 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169

#ifdef CONFIG_CPU_TLB_V6
# define v6wbi_possible_flags	v6wbi_tlb_flags
# define v6wbi_always_flags	v6wbi_tlb_flags
# ifdef _TLB
#  define MULTI_TLB 1
# else
#  define _TLB v6wbi
# endif
#else
# define v6wbi_possible_flags	0
# define v6wbi_always_flags	(-1UL)
#endif

170
#define v7wbi_tlb_flags_smp	(TLB_WB | TLB_BARRIER | \
171 172
				 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
				 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
173
#define v7wbi_tlb_flags_up	(TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
174 175
				 TLB_V6_U_FULL | TLB_V6_U_PAGE | \
				 TLB_V6_U_ASID | TLB_V6_BP)
176

177
#ifdef CONFIG_CPU_TLB_V7
178 179 180 181 182 183 184 185 186 187 188

# ifdef CONFIG_SMP_ON_UP
#  define v7wbi_possible_flags	(v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
#  define v7wbi_always_flags	(v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
# elif defined(CONFIG_SMP)
#  define v7wbi_possible_flags	v7wbi_tlb_flags_smp
#  define v7wbi_always_flags	v7wbi_tlb_flags_smp
# else
#  define v7wbi_possible_flags	v7wbi_tlb_flags_up
#  define v7wbi_always_flags	v7wbi_tlb_flags_up
# endif
189 190 191 192 193 194 195 196 197 198
# ifdef _TLB
#  define MULTI_TLB 1
# else
#  define _TLB v7wbi
# endif
#else
# define v7wbi_possible_flags	0
# define v7wbi_always_flags	(-1UL)
#endif

L
Linus Torvalds 已提交
199 200 201 202 203 204
#ifndef _TLB
#error Unknown TLB model
#endif

#ifndef __ASSEMBLY__

A
Alexey Dobriyan 已提交
205 206
#include <linux/sched.h>

L
Linus Torvalds 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
struct cpu_tlb_fns {
	void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
	void (*flush_kern_range)(unsigned long, unsigned long);
	unsigned long tlb_flags;
};

/*
 * Select the calling method
 */
#ifdef MULTI_TLB

#define __cpu_flush_user_tlb_range	cpu_tlb.flush_user_range
#define __cpu_flush_kern_tlb_range	cpu_tlb.flush_kern_range

#else

#define __cpu_flush_user_tlb_range	__glue(_TLB,_flush_user_tlb_range)
#define __cpu_flush_kern_tlb_range	__glue(_TLB,_flush_kern_tlb_range)

extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);

#endif

extern struct cpu_tlb_fns cpu_tlb;

#define __cpu_tlb_flags			cpu_tlb.tlb_flags

/*
 *	TLB Management
 *	==============
 *
 *	The arch/arm/mm/tlb-*.S files implement these methods.
 *
 *	The TLB specific code is expected to perform whatever tests it
 *	needs to determine if it should invalidate the TLB for each
 *	call.  Start addresses are inclusive and end addresses are
 *	exclusive; it is safe to round these addresses down.
 *
 *	flush_tlb_all()
 *
 *		Invalidate the entire TLB.
 *
 *	flush_tlb_mm(mm)
 *
 *		Invalidate all TLB entries in a particular address
 *		space.
 *		- mm	- mm_struct describing address space
 *
256
 *	flush_tlb_range(vma,start,end)
L
Linus Torvalds 已提交
257 258 259 260 261 262 263
 *
 *		Invalidate a range of TLB entries in the specified
 *		address space.
 *		- mm	- mm_struct describing address space
 *		- start - start address (may not be aligned)
 *		- end	- end address (exclusive, may not be aligned)
 *
264
 *	flush_tlb_page(vma, uaddr)
L
Linus Torvalds 已提交
265 266
 *
 *		Invalidate the specified page in the specified address range.
267
 *		- vma	- vm_area_struct describing address range
L
Linus Torvalds 已提交
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
 *		- vaddr - virtual address (may not be aligned)
 */

/*
 * We optimise the code below by:
 *  - building a set of TLB flags that might be set in __cpu_tlb_flags
 *  - building a set of TLB flags that will always be set in __cpu_tlb_flags
 *  - if we're going to need __cpu_tlb_flags, access it once and only once
 *
 * This allows us to build optimal assembly for the single-CPU type case,
 * and as close to optimal given the compiler constrants for multi-CPU
 * case.  We could do better for the multi-CPU case if the compiler
 * implemented the "%?" method, but this has been discontinued due to too
 * many people getting it wrong.
 */
283
#define possible_tlb_flags	(v4_possible_flags | \
L
Linus Torvalds 已提交
284
				 v4wbi_possible_flags | \
285
				 fr_possible_flags | \
L
Linus Torvalds 已提交
286
				 v4wb_possible_flags | \
P
Paulius Zaleckas 已提交
287
				 fa_possible_flags | \
288 289
				 v6wbi_possible_flags | \
				 v7wbi_possible_flags)
L
Linus Torvalds 已提交
290

291
#define always_tlb_flags	(v4_always_flags & \
L
Linus Torvalds 已提交
292
				 v4wbi_always_flags & \
293
				 fr_always_flags & \
L
Linus Torvalds 已提交
294
				 v4wb_always_flags & \
P
Paulius Zaleckas 已提交
295
				 fa_always_flags & \
296 297
				 v6wbi_always_flags & \
				 v7wbi_always_flags)
L
Linus Torvalds 已提交
298 299 300

#define tlb_flag(f)	((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
#define __tlb_op(f, insnarg, arg)					\
	do {								\
		if (always_tlb_flags & (f))				\
			asm("mcr " insnarg				\
			    : : "r" (arg) : "cc");			\
		else if (possible_tlb_flags & (f))			\
			asm("tst %1, %2\n\t"				\
			    "mcrne " insnarg				\
			    : : "r" (arg), "r" (__tlb_flag), "Ir" (f)	\
			    : "cc");					\
	} while (0)

#define tlb_op(f, regs, arg)	__tlb_op(f, "p15, 0, %0, " regs, arg)
#define tlb_l2_op(f, regs, arg)	__tlb_op(f, "p15, 1, %0, " regs, arg)

316 317 318 319 320 321 322 323 324 325
static inline void __local_flush_tlb_all(void)
{
	const int zero = 0;
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
	tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
	tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
}

326
static inline void local_flush_tlb_all(void)
L
Linus Torvalds 已提交
327 328 329 330 331
{
	const int zero = 0;
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	if (tlb_flag(TLB_WB))
332
		dsb(nshst);
L
Linus Torvalds 已提交
333

334 335
	__local_flush_tlb_all();
	tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
336

337
	if (tlb_flag(TLB_BARRIER)) {
338
		dsb(nsh);
339 340
		isb();
	}
L
Linus Torvalds 已提交
341 342
}

343
static inline void __flush_tlb_all(void)
L
Linus Torvalds 已提交
344 345 346 347 348
{
	const int zero = 0;
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	if (tlb_flag(TLB_WB))
349
		dsb(ishst);
L
Linus Torvalds 已提交
350

351 352 353 354
	__local_flush_tlb_all();
	tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);

	if (tlb_flag(TLB_BARRIER)) {
355
		dsb(ish);
356 357 358 359 360 361 362 363 364 365
		isb();
	}
}

static inline void __local_flush_tlb_mm(struct mm_struct *mm)
{
	const int zero = 0;
	const int asid = ASID(mm);
	const unsigned int __tlb_flag = __cpu_tlb_flags;

366
	if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
367
		if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
368 369 370 371
			tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
			tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
			tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
		}
L
Linus Torvalds 已提交
372
	}
373 374 375 376

	tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
	tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
	tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
377 378 379 380 381 382 383 384
}

static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
	const int asid = ASID(mm);
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	if (tlb_flag(TLB_WB))
385
		dsb(nshst);
386 387 388 389 390

	__local_flush_tlb_mm(mm);
	tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);

	if (tlb_flag(TLB_BARRIER))
391
		dsb(nsh);
392 393 394 395 396 397 398
}

static inline void __flush_tlb_mm(struct mm_struct *mm)
{
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	if (tlb_flag(TLB_WB))
399
		dsb(ishst);
400 401

	__local_flush_tlb_mm(mm);
402
#ifdef CONFIG_ARM_ERRATA_720789
403
	tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
404
#else
405
	tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
406
#endif
407

408
	if (tlb_flag(TLB_BARRIER))
409
		dsb(ish);
L
Linus Torvalds 已提交
410 411 412
}

static inline void
413
__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
L
Linus Torvalds 已提交
414 415 416 417 418 419
{
	const int zero = 0;
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);

420
	if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
421 422 423 424
	    cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
		tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
		tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
		tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
L
Linus Torvalds 已提交
425
		if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
426
			asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
L
Linus Torvalds 已提交
427 428
	}

429 430 431
	tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
	tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
	tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
432 433 434 435 436 437 438 439 440 441
}

static inline void
local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);

	if (tlb_flag(TLB_WB))
442
		dsb(nshst);
443 444 445 446 447

	__local_flush_tlb_page(vma, uaddr);
	tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);

	if (tlb_flag(TLB_BARRIER))
448
		dsb(nsh);
449 450 451 452 453 454 455 456 457 458
}

static inline void
__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);

	if (tlb_flag(TLB_WB))
459
		dsb(ishst);
460 461

	__local_flush_tlb_page(vma, uaddr);
462
#ifdef CONFIG_ARM_ERRATA_720789
463
	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
464
#else
465
	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
466
#endif
467

468
	if (tlb_flag(TLB_BARRIER))
469
		dsb(ish);
L
Linus Torvalds 已提交
470 471
}

472
static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
L
Linus Torvalds 已提交
473 474 475 476
{
	const int zero = 0;
	const unsigned int __tlb_flag = __cpu_tlb_flags;

477 478 479
	tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
	tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
	tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
L
Linus Torvalds 已提交
480
	if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
481
		asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
L
Linus Torvalds 已提交
482

483 484 485
	tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
	tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
	tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
486 487 488 489 490 491 492 493 494
}

static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
{
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	kaddr &= PAGE_MASK;

	if (tlb_flag(TLB_WB))
495
		dsb(nshst);
496 497 498 499 500

	__local_flush_tlb_kernel_page(kaddr);
	tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);

	if (tlb_flag(TLB_BARRIER)) {
501
		dsb(nsh);
502 503 504 505 506 507 508 509 510 511 512
		isb();
	}
}

static inline void __flush_tlb_kernel_page(unsigned long kaddr)
{
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	kaddr &= PAGE_MASK;

	if (tlb_flag(TLB_WB))
513
		dsb(ishst);
514 515

	__local_flush_tlb_kernel_page(kaddr);
516
	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
517

518
	if (tlb_flag(TLB_BARRIER)) {
519
		dsb(ish);
520 521
		isb();
	}
L
Linus Torvalds 已提交
522 523
}

524 525 526 527
/*
 * Branch predictor maintenance is paired with full TLB invalidation, so
 * there is no need for any barriers here.
 */
528 529 530 531 532 533 534 535 536
static inline void __local_flush_bp_all(void)
{
	const int zero = 0;
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	if (tlb_flag(TLB_V6_BP))
		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
}

537 538 539 540 541
static inline void local_flush_bp_all(void)
{
	const int zero = 0;
	const unsigned int __tlb_flag = __cpu_tlb_flags;

542
	__local_flush_bp_all();
543 544 545 546
	if (tlb_flag(TLB_V7_UIS_BP))
		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
}

547 548 549 550 551 552 553 554 555 556
static inline void __flush_bp_all(void)
{
	const int zero = 0;
	const unsigned int __tlb_flag = __cpu_tlb_flags;

	__local_flush_bp_all();
	if (tlb_flag(TLB_V7_UIS_BP))
		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
}

L
Linus Torvalds 已提交
557 558 559 560 561 562 563 564 565 566 567 568 569
/*
 *	flush_pmd_entry
 *
 *	Flush a PMD entry (word aligned, or double-word aligned) to
 *	RAM if the TLB for the CPU we are running on requires this.
 *	This is typically used when we are creating PMD entries.
 *
 *	clean_pmd_entry
 *
 *	Clean (but don't drain the write buffer) if the CPU requires
 *	these operations.  This is typically used when we are removing
 *	PMD entries.
 */
570
static inline void flush_pmd_entry(void *pmd)
L
Linus Torvalds 已提交
571 572 573
{
	const unsigned int __tlb_flag = __cpu_tlb_flags;

574 575
	tlb_op(TLB_DCLEAN, "c7, c10, 1	@ flush_pmd", pmd);
	tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
576

L
Linus Torvalds 已提交
577
	if (tlb_flag(TLB_WB))
578
		dsb(ishst);
L
Linus Torvalds 已提交
579 580
}

581
static inline void clean_pmd_entry(void *pmd)
L
Linus Torvalds 已提交
582 583 584
{
	const unsigned int __tlb_flag = __cpu_tlb_flags;

585 586
	tlb_op(TLB_DCLEAN, "c7, c10, 1	@ flush_pmd", pmd);
	tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
L
Linus Torvalds 已提交
587 588
}

589
#undef tlb_op
L
Linus Torvalds 已提交
590 591 592 593 594 595 596
#undef tlb_flag
#undef always_tlb_flags
#undef possible_tlb_flags

/*
 * Convert calls to our calling convention.
 */
597 598 599 600 601 602 603 604 605 606
#define local_flush_tlb_range(vma,start,end)	__cpu_flush_user_tlb_range(start,end,vma)
#define local_flush_tlb_kernel_range(s,e)	__cpu_flush_kern_tlb_range(s,e)

#ifndef CONFIG_SMP
#define flush_tlb_all		local_flush_tlb_all
#define flush_tlb_mm		local_flush_tlb_mm
#define flush_tlb_page		local_flush_tlb_page
#define flush_tlb_kernel_page	local_flush_tlb_kernel_page
#define flush_tlb_range		local_flush_tlb_range
#define flush_tlb_kernel_range	local_flush_tlb_kernel_range
607
#define flush_bp_all		local_flush_bp_all
608 609 610 611 612 613 614
#else
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
extern void flush_tlb_kernel_page(unsigned long kaddr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
615
extern void flush_bp_all(void);
616
#endif
L
Linus Torvalds 已提交
617 618

/*
619
 * If PG_dcache_clean is not set for the page, we need to ensure that any
L
Linus Torvalds 已提交
620
 * cache entries for the kernels virtual memory range are written
621 622
 * back to the page. On ARMv6 and later, the cache coherency is handled via
 * the set_pte_at() function.
L
Linus Torvalds 已提交
623
 */
624
#if __LINUX_ARM_ARCH__ < 6
625 626
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
	pte_t *ptep);
627 628 629 630 631 632
#else
static inline void update_mmu_cache(struct vm_area_struct *vma,
				    unsigned long addr, pte_t *ptep)
{
}
#endif
L
Linus Torvalds 已提交
633

634 635
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)

L
Linus Torvalds 已提交
636 637
#endif

638 639 640 641 642 643 644 645 646
#elif defined(CONFIG_SMP)	/* !CONFIG_MMU */

#ifndef __ASSEMBLY__
static inline void local_flush_tlb_all(void)									{ }
static inline void local_flush_tlb_mm(struct mm_struct *mm)							{ }
static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)			{ }
static inline void local_flush_tlb_kernel_page(unsigned long kaddr)						{ }
static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)	{ }
static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)				{ }
647
static inline void local_flush_bp_all(void)									{ }
648 649 650 651 652 653 654

extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
extern void flush_tlb_kernel_page(unsigned long kaddr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
655
extern void flush_bp_all(void);
656 657 658
#endif	/* __ASSEMBLY__ */

#endif
659

660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
#ifndef __ASSEMBLY__
#ifdef CONFIG_ARM_ERRATA_798181
extern void erratum_a15_798181_init(void);
#else
static inline void erratum_a15_798181_init(void) {}
#endif
extern bool (*erratum_a15_798181_handler)(void);

static inline bool erratum_a15_798181(void)
{
	if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
		erratum_a15_798181_handler))
		return erratum_a15_798181_handler();
	return false;
}
#endif

L
Linus Torvalds 已提交
677
#endif